b050fc594a76f97ab72833c38792f62daf4f100d
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 /** @file  src/ffmpeg_decoder.cc
22  *  @brief A decoder using FFmpeg to decode content.
23  */
24
25 #include "filter.h"
26 #include "exceptions.h"
27 #include "image.h"
28 #include "util.h"
29 #include "log.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
40 #include "film.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
50 extern "C" {
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
53 }
54 #include <boost/algorithm/string.hpp>
55 #include <vector>
56 #include <iomanip>
57 #include <iostream>
58 #include <stdint.h>
59
60 #include "i18n.h"
61
62 using std::cout;
63 using std::string;
64 using std::vector;
65 using std::list;
66 using std::min;
67 using std::pair;
68 using std::max;
69 using std::map;
70 using std::shared_ptr;
71 using boost::is_any_of;
72 using boost::split;
73 using boost::optional;
74 using std::dynamic_pointer_cast;
75 using dcp::Size;
76 using namespace dcpomatic;
77
78 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
79         : FFmpeg (c)
80         , Decoder (film)
81         , _have_current_subtitle (false)
82 {
83         if (c->video && c->video->use()) {
84                 video.reset (new VideoDecoder (this, c));
85                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
86                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
87                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
88                 _black_image->make_black ();
89         } else {
90                 _pts_offset = ContentTime ();
91         }
92
93         if (c->audio) {
94                 audio.reset (new AudioDecoder (this, c->audio, fast));
95         }
96
97         if (c->only_text()) {
98                 /* XXX: this time here should be the time of the first subtitle, not 0 */
99                 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
100         }
101
102         _next_time.resize (_format_context->nb_streams);
103 }
104
105 void
106 FFmpegDecoder::flush ()
107 {
108         /* Get any remaining frames */
109
110         _packet.data = 0;
111         _packet.size = 0;
112
113         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
114
115         while (video && decode_video_packet()) {}
116
117         if (audio) {
118                 decode_audio_packet ();
119         }
120
121         /* Make sure all streams are the same length and round up to the next video frame */
122
123         FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
124         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
125         full_length = full_length.ceil (frc.source);
126         if (video) {
127                 double const vfr = _ffmpeg_content->video_frame_rate().get();
128                 Frame const f = full_length.frames_round (vfr);
129                 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
130                 while (v < f) {
131                         video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
132                         ++v;
133                 }
134         }
135
136         for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
137                 ContentTime a = audio->stream_position(film(), i);
138                 /* Unfortunately if a is 0 that really means that we don't know the stream position since
139                    there has been no data on it since the last seek.  In this case we'll just do nothing
140                    here.  I'm not sure if that's the right idea.
141                 */
142                 if (a > ContentTime()) {
143                         while (a < full_length) {
144                                 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
145                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
146                                 silence->make_silent ();
147                                 audio->emit (film(), i, silence, a, true);
148                                 a += to_do;
149                         }
150                 }
151         }
152
153         if (audio) {
154                 audio->flush ();
155         }
156 }
157
158 bool
159 FFmpegDecoder::pass ()
160 {
161         int r = av_read_frame (_format_context, &_packet);
162
163         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
164            has pretty-much succeeded (and hence generated data which should be processed).
165            Hence it makes sense to continue here in that case.
166         */
167         if (r < 0 && r != AVERROR_INVALIDDATA) {
168                 if (r != AVERROR_EOF) {
169                         /* Maybe we should fail here, but for now we'll just finish off instead */
170                         char buf[256];
171                         av_strerror (r, buf, sizeof(buf));
172                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
173                 }
174
175                 flush ();
176                 return true;
177         }
178
179         int const si = _packet.stream_index;
180         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
181
182         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
183                 decode_video_packet ();
184         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
185                 decode_subtitle_packet ();
186         } else {
187                 decode_audio_packet ();
188         }
189
190         av_packet_unref (&_packet);
191         return false;
192 }
193
194 /** @param data pointer to array of pointers to buffers.
195  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
196  */
197 shared_ptr<AudioBuffers>
198 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
199 {
200         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
201
202 DCPOMATIC_DISABLE_WARNINGS
203         int const size = av_samples_get_buffer_size (
204                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
205                 );
206 DCPOMATIC_ENABLE_WARNINGS
207
208         /* XXX: can't we just use _frame->nb_samples directly here? */
209         /* XXX: can't we use swr_convert() to do the format conversion? */
210
211         /* Deinterleave and convert to float */
212
213         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
214            of the block that do not form a complete sample or frame they will be dropped.
215         */
216         int const total_samples = size / bytes_per_audio_sample (stream);
217         int const channels = stream->channels();
218         int const frames = total_samples / channels;
219         shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
220         float** data = audio->data();
221
222         switch (audio_sample_format (stream)) {
223         case AV_SAMPLE_FMT_U8:
224         {
225                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
226                 int sample = 0;
227                 int channel = 0;
228                 for (int i = 0; i < total_samples; ++i) {
229                         data[channel][sample] = float(*p++) / (1 << 23);
230
231                         ++channel;
232                         if (channel == channels) {
233                                 channel = 0;
234                                 ++sample;
235                         }
236                 }
237         }
238         break;
239
240         case AV_SAMPLE_FMT_S16:
241         {
242                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
243                 int sample = 0;
244                 int channel = 0;
245                 for (int i = 0; i < total_samples; ++i) {
246                         data[channel][sample] = float(*p++) / (1 << 15);
247
248                         ++channel;
249                         if (channel == channels) {
250                                 channel = 0;
251                                 ++sample;
252                         }
253                 }
254         }
255         break;
256
257         case AV_SAMPLE_FMT_S16P:
258         {
259                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
260                 for (int i = 0; i < channels; ++i) {
261                         for (int j = 0; j < frames; ++j) {
262                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
263                         }
264                 }
265         }
266         break;
267
268         case AV_SAMPLE_FMT_S32:
269         {
270                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
271                 int sample = 0;
272                 int channel = 0;
273                 for (int i = 0; i < total_samples; ++i) {
274                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
275
276                         ++channel;
277                         if (channel == channels) {
278                                 channel = 0;
279                                 ++sample;
280                         }
281                 }
282         }
283         break;
284
285         case AV_SAMPLE_FMT_S32P:
286         {
287                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
288                 for (int i = 0; i < channels; ++i) {
289                         for (int j = 0; j < frames; ++j) {
290                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
291                         }
292                 }
293         }
294         break;
295
296         case AV_SAMPLE_FMT_FLT:
297         {
298                 float* p = reinterpret_cast<float*> (_frame->data[0]);
299                 int sample = 0;
300                 int channel = 0;
301                 for (int i = 0; i < total_samples; ++i) {
302                         data[channel][sample] = *p++;
303
304                         ++channel;
305                         if (channel == channels) {
306                                 channel = 0;
307                                 ++sample;
308                         }
309                 }
310         }
311         break;
312
313         case AV_SAMPLE_FMT_FLTP:
314         {
315                 float** p = reinterpret_cast<float**> (_frame->data);
316                 DCPOMATIC_ASSERT (_frame->channels <= channels);
317                 /* Sometimes there aren't as many channels in the _frame as in the stream */
318                 for (int i = 0; i < _frame->channels; ++i) {
319                         memcpy (data[i], p[i], frames * sizeof(float));
320                 }
321                 for (int i = _frame->channels; i < channels; ++i) {
322                         audio->make_silent (i);
323                 }
324         }
325         break;
326
327         default:
328                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
329         }
330
331         return audio;
332 }
333
334 AVSampleFormat
335 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
336 {
337 DCPOMATIC_DISABLE_WARNINGS
338         return stream->stream (_format_context)->codec->sample_fmt;
339 DCPOMATIC_ENABLE_WARNINGS
340 }
341
342 int
343 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
344 {
345         return av_get_bytes_per_sample (audio_sample_format (stream));
346 }
347
348 void
349 FFmpegDecoder::seek (ContentTime time, bool accurate)
350 {
351         Decoder::seek (time, accurate);
352
353         /* If we are doing an `accurate' seek, we need to use pre-roll, as
354            we don't really know what the seek will give us.
355         */
356
357         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
358         time -= pre_roll;
359
360         /* XXX: it seems debatable whether PTS should be used here...
361            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
362         */
363
364         optional<int> stream;
365
366         if (_video_stream) {
367                 stream = _video_stream;
368         } else {
369                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
370                 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
371                 if (s) {
372                         stream = s->index (_format_context);
373                 }
374         }
375
376         DCPOMATIC_ASSERT (stream);
377
378         ContentTime u = time - _pts_offset;
379         if (u < ContentTime ()) {
380                 u = ContentTime ();
381         }
382         av_seek_frame (
383                 _format_context,
384                 stream.get(),
385                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
386                 AVSEEK_FLAG_BACKWARD
387                 );
388
389         {
390                 /* Force re-creation of filter graphs to reset them and hence to make sure
391                    they don't have any pre-seek frames knocking about.
392                 */
393                 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
394                 _filter_graphs.clear ();
395         }
396
397         if (video_codec_context ()) {
398                 avcodec_flush_buffers (video_codec_context());
399         }
400
401 DCPOMATIC_DISABLE_WARNINGS
402         for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
403                 avcodec_flush_buffers (i->stream(_format_context)->codec);
404         }
405 DCPOMATIC_ENABLE_WARNINGS
406
407         if (subtitle_codec_context ()) {
408                 avcodec_flush_buffers (subtitle_codec_context ());
409         }
410
411         _have_current_subtitle = false;
412
413         for (auto& i: _next_time) {
414                 i = optional<ContentTime>();
415         }
416 }
417
418 void
419 FFmpegDecoder::decode_audio_packet ()
420 {
421         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
422            several times.
423         */
424
425         AVPacket copy_packet = _packet;
426         int const stream_index = copy_packet.stream_index;
427
428         /* XXX: inefficient */
429         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
430         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
431         while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
432                 ++stream;
433         }
434
435         if (stream == streams.end ()) {
436                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
437                 return;
438         }
439
440 DCPOMATIC_DISABLE_WARNINGS
441         while (copy_packet.size > 0) {
442
443                 int frame_finished;
444                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
445                 if (decode_result < 0) {
446                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
447                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
448                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
449                            even in the face of such an error, so I think we should too.
450
451                            Returning from the method here caused mantis #352.
452                         */
453                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
454
455                         /* Fudge decode_result so that we come out of the while loop when
456                            we've processed this data.
457                         */
458                         decode_result = copy_packet.size;
459                 }
460
461                 if (frame_finished) {
462                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
463
464                         ContentTime ct;
465                         if (_frame->pts == AV_NOPTS_VALUE) {
466                                 /* In some streams we see not every frame coming through with a timestamp; for those
467                                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
468                                    particularly noticeable with TrueHD streams (see #1111).
469                                 */
470                                 if (_next_time[stream_index]) {
471                                         ct = *_next_time[stream_index];
472                                 }
473                         } else {
474                                 ct = ContentTime::from_seconds (
475                                         av_frame_get_best_effort_timestamp (_frame) *
476                                         av_q2d ((*stream)->stream (_format_context)->time_base))
477                                         + _pts_offset;
478                         }
479
480                         _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
481
482                         if (ct < ContentTime ()) {
483                                 /* Discard audio data that comes before time 0 */
484                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
485                                 data->move (data->frames() - remove, remove, 0);
486                                 data->set_frames (data->frames() - remove);
487                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
488                         }
489
490                         if (ct < ContentTime()) {
491                                 LOG_WARNING (
492                                         "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
493                                         to_string(ct),
494                                         data->frames(),
495                                         copy_packet.stream_index,
496                                         copy_packet.pts,
497                                         av_frame_get_best_effort_timestamp(_frame),
498                                         av_q2d((*stream)->stream(_format_context)->time_base),
499                                         to_string(_pts_offset)
500                                         );
501                         }
502 DCPOMATIC_ENABLE_WARNINGS
503
504                         /* Give this data provided there is some, and its time is sane */
505                         if (ct >= ContentTime() && data->frames() > 0) {
506                                 audio->emit (film(), *stream, data, ct);
507                         }
508                 }
509
510                 copy_packet.data += decode_result;
511                 copy_packet.size -= decode_result;
512         }
513 }
514
515 bool
516 FFmpegDecoder::decode_video_packet ()
517 {
518         DCPOMATIC_ASSERT (_video_stream);
519
520         int frame_finished;
521 DCPOMATIC_DISABLE_WARNINGS
522         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
523                 return false;
524         }
525 DCPOMATIC_ENABLE_WARNINGS
526
527         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
528
529         shared_ptr<VideoFilterGraph> graph;
530
531         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
532         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
533                 ++i;
534         }
535
536         if (i == _filter_graphs.end ()) {
537                 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
538                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
539                 graph->setup (_ffmpeg_content->filters ());
540                 _filter_graphs.push_back (graph);
541                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
542         } else {
543                 graph = *i;
544         }
545
546         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
547
548         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
549
550                 shared_ptr<Image> image = i->first;
551
552                 if (i->second != AV_NOPTS_VALUE) {
553                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
554
555                         video->emit (
556                                 film(),
557                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
558                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
559                                 );
560                 } else {
561                         LOG_WARNING_NC ("Dropping frame without PTS");
562                 }
563         }
564
565         return true;
566 }
567
568 void
569 FFmpegDecoder::decode_subtitle_packet ()
570 {
571         int got_subtitle;
572         AVSubtitle sub;
573         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
574                 return;
575         }
576
577         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
578         if (_have_current_subtitle) {
579                 if (_current_subtitle_to) {
580                         only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
581                 } else {
582                         only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
583                 }
584                 _have_current_subtitle = false;
585         }
586
587         if (sub.num_rects <= 0) {
588                 /* Nothing new in this subtitle */
589                 return;
590         }
591
592         /* Subtitle PTS (within the source, not taking into account any of the
593            source that we may have chopped off for the DCP).
594         */
595         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
596         ContentTime from;
597         from = sub_period.from + _pts_offset;
598         if (sub_period.to) {
599                 _current_subtitle_to = *sub_period.to + _pts_offset;
600         } else {
601                 _current_subtitle_to = optional<ContentTime>();
602                 _have_current_subtitle = true;
603         }
604
605         for (unsigned int i = 0; i < sub.num_rects; ++i) {
606                 AVSubtitleRect const * rect = sub.rects[i];
607
608                 switch (rect->type) {
609                 case SUBTITLE_NONE:
610                         break;
611                 case SUBTITLE_BITMAP:
612                         decode_bitmap_subtitle (rect, from);
613                         break;
614                 case SUBTITLE_TEXT:
615                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
616                         break;
617                 case SUBTITLE_ASS:
618                         decode_ass_subtitle (rect->ass, from);
619                         break;
620                 }
621         }
622
623         if (_current_subtitle_to) {
624                 only_text()->emit_stop (*_current_subtitle_to);
625         }
626
627         avsubtitle_free (&sub);
628 }
629
630 void
631 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
632 {
633         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
634            G, third R, fourth A.
635         */
636         shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
637
638 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
639         /* Start of the first line in the subtitle */
640         uint8_t* sub_p = rect->pict.data[0];
641         /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
642            (i.e. first byte B, second G, third R, fourth A)
643         */
644         uint8_t const * palette = rect->pict.data[1];
645 #else
646         /* Start of the first line in the subtitle */
647         uint8_t* sub_p = rect->data[0];
648         /* sub_p looks up into a BGRA palette which is at rect->data[1].
649            (first byte B, second G, third R, fourth A)
650         */
651         uint8_t const * palette = rect->data[1];
652 #endif
653         /* And the stream has a map of those palette colours to colours
654            chosen by the user; created a `mapped' palette from those settings.
655         */
656         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
657         vector<RGBA> mapped_palette (rect->nb_colors);
658         for (int i = 0; i < rect->nb_colors; ++i) {
659                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
660                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
661                 if (j != colour_map.end ()) {
662                         mapped_palette[i] = j->second;
663                 } else {
664                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
665                            it is from a project that was created before this stuff was added.  Just use the
666                            colour straight from the original palette.
667                         */
668                         mapped_palette[i] = c;
669                 }
670                 palette += 4;
671         }
672
673         /* Start of the output data */
674         uint8_t* out_p = image->data()[0];
675
676         for (int y = 0; y < rect->h; ++y) {
677                 uint8_t* sub_line_p = sub_p;
678                 uint8_t* out_line_p = out_p;
679                 for (int x = 0; x < rect->w; ++x) {
680                         RGBA const p = mapped_palette[*sub_line_p++];
681                         *out_line_p++ = p.b;
682                         *out_line_p++ = p.g;
683                         *out_line_p++ = p.r;
684                         *out_line_p++ = p.a;
685                 }
686 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
687                 sub_p += rect->pict.linesize[0];
688 #else
689                 sub_p += rect->linesize[0];
690 #endif
691                 out_p += image->stride()[0];
692         }
693
694         int target_width = subtitle_codec_context()->width;
695         if (target_width == 0 && video_codec_context()) {
696                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
697                    know if it's supposed to mean something from FFmpeg's point of view.
698                 */
699                 target_width = video_codec_context()->width;
700         }
701         int target_height = subtitle_codec_context()->height;
702         if (target_height == 0 && video_codec_context()) {
703                 target_height = video_codec_context()->height;
704         }
705         DCPOMATIC_ASSERT (target_width);
706         DCPOMATIC_ASSERT (target_height);
707         dcpomatic::Rect<double> const scaled_rect (
708                 static_cast<double> (rect->x) / target_width,
709                 static_cast<double> (rect->y) / target_height,
710                 static_cast<double> (rect->w) / target_width,
711                 static_cast<double> (rect->h) / target_height
712                 );
713
714         only_text()->emit_bitmap_start (from, image, scaled_rect);
715 }
716
717 void
718 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
719 {
720         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
721            produces a single format of Dialogue: lines...
722         */
723
724         int commas = 0;
725         string text;
726         for (size_t i = 0; i < ass.length(); ++i) {
727                 if (commas < 9 && ass[i] == ',') {
728                         ++commas;
729                 } else if (commas == 9) {
730                         text += ass[i];
731                 }
732         }
733
734         if (text.empty ()) {
735                 return;
736         }
737
738         sub::RawSubtitle base;
739         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
740                 base,
741                 text,
742                 _ffmpeg_content->video->size().width,
743                 _ffmpeg_content->video->size().height
744                 );
745
746         for (auto const& i: sub::collect<list<sub::Subtitle> > (raw)) {
747                 only_text()->emit_plain_start (from, i);
748         }
749 }