Accessor for ClosedCaptionsDialog.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 /** @file  src/ffmpeg_decoder.cc
22  *  @brief A decoder using FFmpeg to decode content.
23  */
24
25 #include "filter.h"
26 #include "exceptions.h"
27 #include "image.h"
28 #include "util.h"
29 #include "log.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
40 #include "film.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include <dcp/subtitle_string.h>
46 #include <sub/ssa_reader.h>
47 #include <sub/subtitle.h>
48 #include <sub/collect.h>
49 extern "C" {
50 #include <libavcodec/avcodec.h>
51 #include <libavformat/avformat.h>
52 }
53 #include <boost/foreach.hpp>
54 #include <boost/algorithm/string.hpp>
55 #include <vector>
56 #include <iomanip>
57 #include <iostream>
58 #include <stdint.h>
59
60 #include "i18n.h"
61
62 using std::cout;
63 using std::string;
64 using std::vector;
65 using std::list;
66 using std::min;
67 using std::pair;
68 using std::max;
69 using std::map;
70 using boost::shared_ptr;
71 using boost::is_any_of;
72 using boost::split;
73 using boost::optional;
74 using boost::dynamic_pointer_cast;
75 using dcp::Size;
76 using namespace dcpomatic;
77
78 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
79         : FFmpeg (c)
80         , Decoder (film)
81         , _have_current_subtitle (false)
82 {
83         if (c->video && c->video->use()) {
84                 video.reset (new VideoDecoder (this, c));
85                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
86                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
87                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
88                 _black_image->make_black ();
89         } else {
90                 _pts_offset = ContentTime ();
91         }
92
93         if (c->audio) {
94                 audio.reset (new AudioDecoder (this, c->audio, fast));
95         }
96
97         if (c->only_text()) {
98                 /* XXX: this time here should be the time of the first subtitle, not 0 */
99                 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
100         }
101
102         _next_time.resize (_format_context->nb_streams);
103 }
104
105 void
106 FFmpegDecoder::flush ()
107 {
108         /* Get any remaining frames */
109
110         _packet.data = 0;
111         _packet.size = 0;
112
113         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
114
115         while (video && decode_video_packet()) {}
116
117         if (audio) {
118                 decode_audio_packet ();
119         }
120
121         /* Make sure all streams are the same length and round up to the next video frame */
122
123         FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
124         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
125         full_length = full_length.ceil (frc.source);
126         if (video) {
127                 double const vfr = _ffmpeg_content->video_frame_rate().get();
128                 Frame const f = full_length.frames_round (vfr);
129                 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
130                 while (v < f) {
131                         video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
132                         ++v;
133                 }
134         }
135
136         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
137                 ContentTime a = audio->stream_position(film(), i);
138                 /* Unfortunately if a is 0 that really means that we don't know the stream position since
139                    there has been no data on it since the last seek.  In this case we'll just do nothing
140                    here.  I'm not sure if that's the right idea.
141                 */
142                 if (a > ContentTime()) {
143                         while (a < full_length) {
144                                 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
145                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
146                                 silence->make_silent ();
147                                 audio->emit (film(), i, silence, a);
148                                 a += to_do;
149                         }
150                 }
151         }
152
153         if (audio) {
154                 audio->flush ();
155         }
156 }
157
158 bool
159 FFmpegDecoder::pass ()
160 {
161 #ifdef DCPOMATIC_VARIANT_SWAROOP
162         if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) {
163                 return true;
164         }
165 #endif
166
167         int r = av_read_frame (_format_context, &_packet);
168
169         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
170            has pretty-much succeeded (and hence generated data which should be processed).
171            Hence it makes sense to continue here in that case.
172         */
173         if (r < 0 && r != AVERROR_INVALIDDATA) {
174                 if (r != AVERROR_EOF) {
175                         /* Maybe we should fail here, but for now we'll just finish off instead */
176                         char buf[256];
177                         av_strerror (r, buf, sizeof(buf));
178                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
179                 }
180
181                 flush ();
182                 return true;
183         }
184
185         int const si = _packet.stream_index;
186         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
187
188         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
189                 decode_video_packet ();
190         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
191                 decode_subtitle_packet ();
192         } else {
193                 decode_audio_packet ();
194         }
195
196         av_packet_unref (&_packet);
197         return false;
198 }
199
200 /** @param data pointer to array of pointers to buffers.
201  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
202  */
203 shared_ptr<AudioBuffers>
204 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
205 {
206         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
207
208         int const size = av_samples_get_buffer_size (
209                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
210                 );
211
212         /* Deinterleave and convert to float */
213
214         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
215            of the block that do not form a complete sample or frame they will be dropped.
216         */
217         int const total_samples = size / bytes_per_audio_sample (stream);
218         int const channels = stream->channels();
219         int const frames = total_samples / channels;
220         shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
221         float** data = audio->data();
222
223         switch (audio_sample_format (stream)) {
224         case AV_SAMPLE_FMT_U8:
225         {
226                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
227                 int sample = 0;
228                 int channel = 0;
229                 for (int i = 0; i < total_samples; ++i) {
230                         data[channel][sample] = float(*p++) / (1 << 23);
231
232                         ++channel;
233                         if (channel == channels) {
234                                 channel = 0;
235                                 ++sample;
236                         }
237                 }
238         }
239         break;
240
241         case AV_SAMPLE_FMT_S16:
242         {
243                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
244                 int sample = 0;
245                 int channel = 0;
246                 for (int i = 0; i < total_samples; ++i) {
247                         data[channel][sample] = float(*p++) / (1 << 15);
248
249                         ++channel;
250                         if (channel == channels) {
251                                 channel = 0;
252                                 ++sample;
253                         }
254                 }
255         }
256         break;
257
258         case AV_SAMPLE_FMT_S16P:
259         {
260                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
261                 for (int i = 0; i < channels; ++i) {
262                         for (int j = 0; j < frames; ++j) {
263                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
264                         }
265                 }
266         }
267         break;
268
269         case AV_SAMPLE_FMT_S32:
270         {
271                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
272                 int sample = 0;
273                 int channel = 0;
274                 for (int i = 0; i < total_samples; ++i) {
275                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
276
277                         ++channel;
278                         if (channel == channels) {
279                                 channel = 0;
280                                 ++sample;
281                         }
282                 }
283         }
284         break;
285
286         case AV_SAMPLE_FMT_S32P:
287         {
288                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
289                 for (int i = 0; i < channels; ++i) {
290                         for (int j = 0; j < frames; ++j) {
291                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
292                         }
293                 }
294         }
295         break;
296
297         case AV_SAMPLE_FMT_FLT:
298         {
299                 float* p = reinterpret_cast<float*> (_frame->data[0]);
300                 int sample = 0;
301                 int channel = 0;
302                 for (int i = 0; i < total_samples; ++i) {
303                         data[channel][sample] = *p++;
304
305                         ++channel;
306                         if (channel == channels) {
307                                 channel = 0;
308                                 ++sample;
309                         }
310                 }
311         }
312         break;
313
314         case AV_SAMPLE_FMT_FLTP:
315         {
316                 float** p = reinterpret_cast<float**> (_frame->data);
317                 DCPOMATIC_ASSERT (_frame->channels <= channels);
318                 /* Sometimes there aren't as many channels in the _frame as in the stream */
319                 for (int i = 0; i < _frame->channels; ++i) {
320                         memcpy (data[i], p[i], frames * sizeof(float));
321                 }
322                 for (int i = _frame->channels; i < channels; ++i) {
323                         audio->make_silent (i);
324                 }
325         }
326         break;
327
328         default:
329                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
330         }
331
332         return audio;
333 }
334
335 AVSampleFormat
336 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
337 {
338         return stream->stream (_format_context)->codec->sample_fmt;
339 }
340
341 int
342 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
343 {
344         return av_get_bytes_per_sample (audio_sample_format (stream));
345 }
346
347 void
348 FFmpegDecoder::seek (ContentTime time, bool accurate)
349 {
350         Decoder::seek (time, accurate);
351
352         /* If we are doing an `accurate' seek, we need to use pre-roll, as
353            we don't really know what the seek will give us.
354         */
355
356         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
357         time -= pre_roll;
358
359         /* XXX: it seems debatable whether PTS should be used here...
360            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
361         */
362
363         optional<int> stream;
364
365         if (_video_stream) {
366                 stream = _video_stream;
367         } else {
368                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
369                 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
370                 if (s) {
371                         stream = s->index (_format_context);
372                 }
373         }
374
375         DCPOMATIC_ASSERT (stream);
376
377         ContentTime u = time - _pts_offset;
378         if (u < ContentTime ()) {
379                 u = ContentTime ();
380         }
381         av_seek_frame (
382                 _format_context,
383                 stream.get(),
384                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
385                 AVSEEK_FLAG_BACKWARD
386                 );
387
388         {
389                 /* Force re-creation of filter graphs to reset them and hence to make sure
390                    they don't have any pre-seek frames knocking about.
391                 */
392                 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
393                 _filter_graphs.clear ();
394         }
395
396         if (video_codec_context ()) {
397                 avcodec_flush_buffers (video_codec_context());
398         }
399
400         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
401                 avcodec_flush_buffers (i->stream(_format_context)->codec);
402         }
403
404         if (subtitle_codec_context ()) {
405                 avcodec_flush_buffers (subtitle_codec_context ());
406         }
407
408         _have_current_subtitle = false;
409 }
410
411 void
412 FFmpegDecoder::decode_audio_packet ()
413 {
414         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
415            several times.
416         */
417
418         AVPacket copy_packet = _packet;
419         int const stream_index = copy_packet.stream_index;
420
421         /* XXX: inefficient */
422         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
423         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
424         while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
425                 ++stream;
426         }
427
428         if (stream == streams.end ()) {
429                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
430                 return;
431         }
432
433         while (copy_packet.size > 0) {
434
435                 int frame_finished;
436                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
437                 if (decode_result < 0) {
438                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
439                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
440                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
441                            even in the face of such an error, so I think we should too.
442
443                            Returning from the method here caused mantis #352.
444                         */
445                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
446
447                         /* Fudge decode_result so that we come out of the while loop when
448                            we've processed this data.
449                         */
450                         decode_result = copy_packet.size;
451                 }
452
453                 if (frame_finished) {
454                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
455
456                         ContentTime ct;
457                         if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
458                                 /* In some streams we see not every frame coming through with a timestamp; for those
459                                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
460                                    particularly noticeable with TrueHD streams (see #1111).
461                                 */
462                                 ct = *_next_time[stream_index];
463                         } else {
464                                 ct = ContentTime::from_seconds (
465                                         av_frame_get_best_effort_timestamp (_frame) *
466                                         av_q2d ((*stream)->stream (_format_context)->time_base))
467                                         + _pts_offset;
468                         }
469
470                         _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
471
472                         if (ct < ContentTime ()) {
473                                 /* Discard audio data that comes before time 0 */
474                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
475                                 data->move (data->frames() - remove, remove, 0);
476                                 data->set_frames (data->frames() - remove);
477                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
478                         }
479
480                         if (ct < ContentTime()) {
481                                 LOG_WARNING (
482                                         "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
483                                         to_string(ct),
484                                         data->frames(),
485                                         copy_packet.stream_index,
486                                         copy_packet.pts,
487                                         av_frame_get_best_effort_timestamp(_frame),
488                                         av_q2d((*stream)->stream(_format_context)->time_base),
489                                         to_string(_pts_offset)
490                                         );
491                         }
492
493                         /* Give this data provided there is some, and its time is sane */
494                         if (ct >= ContentTime() && data->frames() > 0) {
495                                 audio->emit (film(), *stream, data, ct);
496                         }
497                 }
498
499                 copy_packet.data += decode_result;
500                 copy_packet.size -= decode_result;
501         }
502 }
503
504 bool
505 FFmpegDecoder::decode_video_packet ()
506 {
507         DCPOMATIC_ASSERT (_video_stream);
508
509         int frame_finished;
510         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
511                 return false;
512         }
513
514         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
515
516         shared_ptr<VideoFilterGraph> graph;
517
518         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
519         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
520                 ++i;
521         }
522
523         if (i == _filter_graphs.end ()) {
524                 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
525                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
526                 graph->setup (_ffmpeg_content->filters ());
527                 _filter_graphs.push_back (graph);
528                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
529         } else {
530                 graph = *i;
531         }
532
533         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
534
535         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
536
537                 shared_ptr<Image> image = i->first;
538
539                 if (i->second != AV_NOPTS_VALUE) {
540                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
541
542                         video->emit (
543                                 film(),
544                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
545                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
546                                 );
547                 } else {
548                         LOG_WARNING_NC ("Dropping frame without PTS");
549                 }
550         }
551
552         return true;
553 }
554
555 void
556 FFmpegDecoder::decode_subtitle_packet ()
557 {
558         int got_subtitle;
559         AVSubtitle sub;
560         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
561                 return;
562         }
563
564         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
565         if (_have_current_subtitle) {
566                 if (_current_subtitle_to) {
567                         only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
568                 } else {
569                         only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
570                 }
571                 _have_current_subtitle = false;
572         }
573
574         if (sub.num_rects <= 0) {
575                 /* Nothing new in this subtitle */
576                 return;
577         }
578
579         /* Subtitle PTS (within the source, not taking into account any of the
580            source that we may have chopped off for the DCP).
581         */
582         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
583         ContentTime from;
584         from = sub_period.from + _pts_offset;
585         if (sub_period.to) {
586                 _current_subtitle_to = *sub_period.to + _pts_offset;
587         } else {
588                 _current_subtitle_to = optional<ContentTime>();
589                 _have_current_subtitle = true;
590         }
591
592         for (unsigned int i = 0; i < sub.num_rects; ++i) {
593                 AVSubtitleRect const * rect = sub.rects[i];
594
595                 switch (rect->type) {
596                 case SUBTITLE_NONE:
597                         break;
598                 case SUBTITLE_BITMAP:
599                         decode_bitmap_subtitle (rect, from);
600                         break;
601                 case SUBTITLE_TEXT:
602                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
603                         break;
604                 case SUBTITLE_ASS:
605                         decode_ass_subtitle (rect->ass, from);
606                         break;
607                 }
608         }
609
610         if (_current_subtitle_to) {
611                 only_text()->emit_stop (*_current_subtitle_to);
612         }
613
614         avsubtitle_free (&sub);
615 }
616
617 void
618 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
619 {
620         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
621            G, third R, fourth A.
622         */
623         shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
624
625 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
626         /* Start of the first line in the subtitle */
627         uint8_t* sub_p = rect->pict.data[0];
628         /* sub_p looks up into a BGRA palette which is here
629            (i.e. first byte B, second G, third R, fourth A)
630         */
631         uint32_t const * palette = (uint32_t *) rect->pict.data[1];
632 #else
633         /* Start of the first line in the subtitle */
634         uint8_t* sub_p = rect->data[0];
635         /* sub_p looks up into a BGRA palette which is at rect->data[1].
636            (first byte B, second G, third R, fourth A)
637         */
638 #endif
639         /* And the stream has a map of those palette colours to colours
640            chosen by the user; created a `mapped' palette from those settings.
641         */
642         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
643         vector<RGBA> mapped_palette (rect->nb_colors);
644         uint8_t const * palette = rect->data[1];
645         for (int i = 0; i < rect->nb_colors; ++i) {
646                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
647                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
648                 if (j != colour_map.end ()) {
649                         mapped_palette[i] = j->second;
650                 } else {
651                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
652                            it is from a project that was created before this stuff was added.  Just use the
653                            colour straight from the original palette.
654                         */
655                         mapped_palette[i] = c;
656                 }
657                 palette += 4;
658         }
659
660         /* Start of the output data */
661         uint8_t* out_p = image->data()[0];
662
663         for (int y = 0; y < rect->h; ++y) {
664                 uint8_t* sub_line_p = sub_p;
665                 uint8_t* out_line_p = out_p;
666                 for (int x = 0; x < rect->w; ++x) {
667                         RGBA const p = mapped_palette[*sub_line_p++];
668                         *out_line_p++ = p.b;
669                         *out_line_p++ = p.g;
670                         *out_line_p++ = p.r;
671                         *out_line_p++ = p.a;
672                 }
673 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
674                 sub_p += rect->pict.linesize[0];
675 #else
676                 sub_p += rect->linesize[0];
677 #endif
678                 out_p += image->stride()[0];
679         }
680
681         int target_width = subtitle_codec_context()->width;
682         if (target_width == 0 && video_codec_context()) {
683                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
684                    know if it's supposed to mean something from FFmpeg's point of view.
685                 */
686                 target_width = video_codec_context()->width;
687         }
688         int target_height = subtitle_codec_context()->height;
689         if (target_height == 0 && video_codec_context()) {
690                 target_height = video_codec_context()->height;
691         }
692         DCPOMATIC_ASSERT (target_width);
693         DCPOMATIC_ASSERT (target_height);
694         dcpomatic::Rect<double> const scaled_rect (
695                 static_cast<double> (rect->x) / target_width,
696                 static_cast<double> (rect->y) / target_height,
697                 static_cast<double> (rect->w) / target_width,
698                 static_cast<double> (rect->h) / target_height
699                 );
700
701         only_text()->emit_bitmap_start (from, image, scaled_rect);
702 }
703
704 void
705 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
706 {
707         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
708            produces a single format of Dialogue: lines...
709         */
710
711         int commas = 0;
712         string text;
713         for (size_t i = 0; i < ass.length(); ++i) {
714                 if (commas < 9 && ass[i] == ',') {
715                         ++commas;
716                 } else if (commas == 9) {
717                         text += ass[i];
718                 }
719         }
720
721         if (text.empty ()) {
722                 return;
723         }
724
725         sub::RawSubtitle base;
726         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
727                 base,
728                 text,
729                 _ffmpeg_content->video->size().width,
730                 _ffmpeg_content->video->size().height
731                 );
732
733         BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
734                 only_text()->emit_plain_start (from, i);
735         }
736 }