Rename TYPE_DEBUG_PLAYER to TYPE_DEBUG_VIDEO_VIEW.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 /** @file  src/ffmpeg_decoder.cc
22  *  @brief A decoder using FFmpeg to decode content.
23  */
24
25 #include "filter.h"
26 #include "exceptions.h"
27 #include "image.h"
28 #include "util.h"
29 #include "log.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
40 #include "film.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
50 extern "C" {
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
53 }
54 #include <boost/foreach.hpp>
55 #include <boost/algorithm/string.hpp>
56 #include <vector>
57 #include <iomanip>
58 #include <iostream>
59 #include <stdint.h>
60
61 #include "i18n.h"
62
63 using std::cout;
64 using std::string;
65 using std::vector;
66 using std::list;
67 using std::min;
68 using std::pair;
69 using std::max;
70 using std::map;
71 using boost::shared_ptr;
72 using boost::is_any_of;
73 using boost::split;
74 using boost::optional;
75 using boost::dynamic_pointer_cast;
76 using dcp::Size;
77 using namespace dcpomatic;
78
79 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
80         : FFmpeg (c)
81         , Decoder (film)
82         , _have_current_subtitle (false)
83 {
84         if (c->video && c->video->use()) {
85                 video.reset (new VideoDecoder (this, c));
86                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
87                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
88                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
89                 _black_image->make_black ();
90         } else {
91                 _pts_offset = ContentTime ();
92         }
93
94         if (c->audio) {
95                 audio.reset (new AudioDecoder (this, c->audio, fast));
96         }
97
98         if (c->only_text()) {
99                 /* XXX: this time here should be the time of the first subtitle, not 0 */
100                 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
101         }
102
103         _next_time.resize (_format_context->nb_streams);
104 }
105
106 void
107 FFmpegDecoder::flush ()
108 {
109         /* Get any remaining frames */
110
111         _packet.data = 0;
112         _packet.size = 0;
113
114         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
115
116         while (video && decode_video_packet()) {}
117
118         if (audio) {
119                 decode_audio_packet ();
120         }
121
122         /* Make sure all streams are the same length and round up to the next video frame */
123
124         FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
125         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
126         full_length = full_length.ceil (frc.source);
127         if (video) {
128                 double const vfr = _ffmpeg_content->video_frame_rate().get();
129                 Frame const f = full_length.frames_round (vfr);
130                 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
131                 while (v < f) {
132                         video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
133                         ++v;
134                 }
135         }
136
137         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
138                 ContentTime a = audio->stream_position(film(), i);
139                 /* Unfortunately if a is 0 that really means that we don't know the stream position since
140                    there has been no data on it since the last seek.  In this case we'll just do nothing
141                    here.  I'm not sure if that's the right idea.
142                 */
143                 if (a > ContentTime()) {
144                         while (a < full_length) {
145                                 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
146                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
147                                 silence->make_silent ();
148                                 audio->emit (film(), i, silence, a);
149                                 a += to_do;
150                         }
151                 }
152         }
153
154         if (audio) {
155                 audio->flush ();
156         }
157 }
158
159 bool
160 FFmpegDecoder::pass ()
161 {
162 #ifdef DCPOMATIC_VARIANT_SWAROOP
163         if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) {
164                 return true;
165         }
166 #endif
167
168         int r = av_read_frame (_format_context, &_packet);
169
170         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
171            has pretty-much succeeded (and hence generated data which should be processed).
172            Hence it makes sense to continue here in that case.
173         */
174         if (r < 0 && r != AVERROR_INVALIDDATA) {
175                 if (r != AVERROR_EOF) {
176                         /* Maybe we should fail here, but for now we'll just finish off instead */
177                         char buf[256];
178                         av_strerror (r, buf, sizeof(buf));
179                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
180                 }
181
182                 flush ();
183                 return true;
184         }
185
186         int const si = _packet.stream_index;
187         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
188
189         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
190                 decode_video_packet ();
191         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
192                 decode_subtitle_packet ();
193         } else {
194                 decode_audio_packet ();
195         }
196
197         av_packet_unref (&_packet);
198         return false;
199 }
200
201 /** @param data pointer to array of pointers to buffers.
202  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
203  */
204 shared_ptr<AudioBuffers>
205 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
206 {
207         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
208
209         int const size = av_samples_get_buffer_size (
210                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
211                 );
212
213         /* XXX: can't we just use _frame->nb_samples directly here? */
214         /* XXX: can't we use swr_convert() to do the format conversion? */
215
216         /* Deinterleave and convert to float */
217
218         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
219            of the block that do not form a complete sample or frame they will be dropped.
220         */
221         int const total_samples = size / bytes_per_audio_sample (stream);
222         int const channels = stream->channels();
223         int const frames = total_samples / channels;
224         shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
225         float** data = audio->data();
226
227         switch (audio_sample_format (stream)) {
228         case AV_SAMPLE_FMT_U8:
229         {
230                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
231                 int sample = 0;
232                 int channel = 0;
233                 for (int i = 0; i < total_samples; ++i) {
234                         data[channel][sample] = float(*p++) / (1 << 23);
235
236                         ++channel;
237                         if (channel == channels) {
238                                 channel = 0;
239                                 ++sample;
240                         }
241                 }
242         }
243         break;
244
245         case AV_SAMPLE_FMT_S16:
246         {
247                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
248                 int sample = 0;
249                 int channel = 0;
250                 for (int i = 0; i < total_samples; ++i) {
251                         data[channel][sample] = float(*p++) / (1 << 15);
252
253                         ++channel;
254                         if (channel == channels) {
255                                 channel = 0;
256                                 ++sample;
257                         }
258                 }
259         }
260         break;
261
262         case AV_SAMPLE_FMT_S16P:
263         {
264                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
265                 for (int i = 0; i < channels; ++i) {
266                         for (int j = 0; j < frames; ++j) {
267                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
268                         }
269                 }
270         }
271         break;
272
273         case AV_SAMPLE_FMT_S32:
274         {
275                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
276                 int sample = 0;
277                 int channel = 0;
278                 for (int i = 0; i < total_samples; ++i) {
279                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
280
281                         ++channel;
282                         if (channel == channels) {
283                                 channel = 0;
284                                 ++sample;
285                         }
286                 }
287         }
288         break;
289
290         case AV_SAMPLE_FMT_S32P:
291         {
292                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
293                 for (int i = 0; i < channels; ++i) {
294                         for (int j = 0; j < frames; ++j) {
295                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
296                         }
297                 }
298         }
299         break;
300
301         case AV_SAMPLE_FMT_FLT:
302         {
303                 float* p = reinterpret_cast<float*> (_frame->data[0]);
304                 int sample = 0;
305                 int channel = 0;
306                 for (int i = 0; i < total_samples; ++i) {
307                         data[channel][sample] = *p++;
308
309                         ++channel;
310                         if (channel == channels) {
311                                 channel = 0;
312                                 ++sample;
313                         }
314                 }
315         }
316         break;
317
318         case AV_SAMPLE_FMT_FLTP:
319         {
320                 float** p = reinterpret_cast<float**> (_frame->data);
321                 DCPOMATIC_ASSERT (_frame->channels <= channels);
322                 /* Sometimes there aren't as many channels in the _frame as in the stream */
323                 for (int i = 0; i < _frame->channels; ++i) {
324                         memcpy (data[i], p[i], frames * sizeof(float));
325                 }
326                 for (int i = _frame->channels; i < channels; ++i) {
327                         audio->make_silent (i);
328                 }
329         }
330         break;
331
332         default:
333                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
334         }
335
336         return audio;
337 }
338
339 AVSampleFormat
340 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
341 {
342         return stream->stream (_format_context)->codec->sample_fmt;
343 }
344
345 int
346 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
347 {
348         return av_get_bytes_per_sample (audio_sample_format (stream));
349 }
350
351 void
352 FFmpegDecoder::seek (ContentTime time, bool accurate)
353 {
354         Decoder::seek (time, accurate);
355
356         /* If we are doing an `accurate' seek, we need to use pre-roll, as
357            we don't really know what the seek will give us.
358         */
359
360         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
361         time -= pre_roll;
362
363         /* XXX: it seems debatable whether PTS should be used here...
364            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
365         */
366
367         optional<int> stream;
368
369         if (_video_stream) {
370                 stream = _video_stream;
371         } else {
372                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
373                 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
374                 if (s) {
375                         stream = s->index (_format_context);
376                 }
377         }
378
379         DCPOMATIC_ASSERT (stream);
380
381         ContentTime u = time - _pts_offset;
382         if (u < ContentTime ()) {
383                 u = ContentTime ();
384         }
385         av_seek_frame (
386                 _format_context,
387                 stream.get(),
388                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
389                 AVSEEK_FLAG_BACKWARD
390                 );
391
392         {
393                 /* Force re-creation of filter graphs to reset them and hence to make sure
394                    they don't have any pre-seek frames knocking about.
395                 */
396                 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
397                 _filter_graphs.clear ();
398         }
399
400         if (video_codec_context ()) {
401                 avcodec_flush_buffers (video_codec_context());
402         }
403
404         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
405                 avcodec_flush_buffers (i->stream(_format_context)->codec);
406         }
407
408         if (subtitle_codec_context ()) {
409                 avcodec_flush_buffers (subtitle_codec_context ());
410         }
411
412         _have_current_subtitle = false;
413 }
414
415 void
416 FFmpegDecoder::decode_audio_packet ()
417 {
418         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
419            several times.
420         */
421
422         AVPacket copy_packet = _packet;
423         int const stream_index = copy_packet.stream_index;
424
425         /* XXX: inefficient */
426         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
427         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
428         while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
429                 ++stream;
430         }
431
432         if (stream == streams.end ()) {
433                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
434                 return;
435         }
436
437         while (copy_packet.size > 0) {
438
439                 int frame_finished;
440                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
441                 if (decode_result < 0) {
442                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
443                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
444                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
445                            even in the face of such an error, so I think we should too.
446
447                            Returning from the method here caused mantis #352.
448                         */
449                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
450
451                         /* Fudge decode_result so that we come out of the while loop when
452                            we've processed this data.
453                         */
454                         decode_result = copy_packet.size;
455                 }
456
457                 if (frame_finished) {
458                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
459
460                         ContentTime ct;
461                         if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
462                                 /* In some streams we see not every frame coming through with a timestamp; for those
463                                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
464                                    particularly noticeable with TrueHD streams (see #1111).
465                                 */
466                                 ct = *_next_time[stream_index];
467                         } else {
468                                 ct = ContentTime::from_seconds (
469                                         av_frame_get_best_effort_timestamp (_frame) *
470                                         av_q2d ((*stream)->stream (_format_context)->time_base))
471                                         + _pts_offset;
472                         }
473
474                         _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
475
476                         if (ct < ContentTime ()) {
477                                 /* Discard audio data that comes before time 0 */
478                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
479                                 data->move (data->frames() - remove, remove, 0);
480                                 data->set_frames (data->frames() - remove);
481                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
482                         }
483
484                         if (ct < ContentTime()) {
485                                 LOG_WARNING (
486                                         "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
487                                         to_string(ct),
488                                         data->frames(),
489                                         copy_packet.stream_index,
490                                         copy_packet.pts,
491                                         av_frame_get_best_effort_timestamp(_frame),
492                                         av_q2d((*stream)->stream(_format_context)->time_base),
493                                         to_string(_pts_offset)
494                                         );
495                         }
496
497                         /* Give this data provided there is some, and its time is sane */
498                         if (ct >= ContentTime() && data->frames() > 0) {
499                                 audio->emit (film(), *stream, data, ct);
500                         }
501                 }
502
503                 copy_packet.data += decode_result;
504                 copy_packet.size -= decode_result;
505         }
506 }
507
508 bool
509 FFmpegDecoder::decode_video_packet ()
510 {
511         DCPOMATIC_ASSERT (_video_stream);
512
513         int frame_finished;
514         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
515                 return false;
516         }
517
518         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
519
520         shared_ptr<VideoFilterGraph> graph;
521
522         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
523         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
524                 ++i;
525         }
526
527         if (i == _filter_graphs.end ()) {
528                 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
529                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
530                 graph->setup (_ffmpeg_content->filters ());
531                 _filter_graphs.push_back (graph);
532                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
533         } else {
534                 graph = *i;
535         }
536
537         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
538
539         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
540
541                 shared_ptr<Image> image = i->first;
542
543                 if (i->second != AV_NOPTS_VALUE) {
544                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
545
546                         video->emit (
547                                 film(),
548                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
549                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
550                                 );
551                 } else {
552                         LOG_WARNING_NC ("Dropping frame without PTS");
553                 }
554         }
555
556         return true;
557 }
558
559 void
560 FFmpegDecoder::decode_subtitle_packet ()
561 {
562         int got_subtitle;
563         AVSubtitle sub;
564         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
565                 return;
566         }
567
568         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
569         if (_have_current_subtitle) {
570                 if (_current_subtitle_to) {
571                         only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
572                 } else {
573                         only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
574                 }
575                 _have_current_subtitle = false;
576         }
577
578         if (sub.num_rects <= 0) {
579                 /* Nothing new in this subtitle */
580                 return;
581         }
582
583         /* Subtitle PTS (within the source, not taking into account any of the
584            source that we may have chopped off for the DCP).
585         */
586         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
587         ContentTime from;
588         from = sub_period.from + _pts_offset;
589         if (sub_period.to) {
590                 _current_subtitle_to = *sub_period.to + _pts_offset;
591         } else {
592                 _current_subtitle_to = optional<ContentTime>();
593                 _have_current_subtitle = true;
594         }
595
596         for (unsigned int i = 0; i < sub.num_rects; ++i) {
597                 AVSubtitleRect const * rect = sub.rects[i];
598
599                 switch (rect->type) {
600                 case SUBTITLE_NONE:
601                         break;
602                 case SUBTITLE_BITMAP:
603                         decode_bitmap_subtitle (rect, from);
604                         break;
605                 case SUBTITLE_TEXT:
606                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
607                         break;
608                 case SUBTITLE_ASS:
609                         decode_ass_subtitle (rect->ass, from);
610                         break;
611                 }
612         }
613
614         if (_current_subtitle_to) {
615                 only_text()->emit_stop (*_current_subtitle_to);
616         }
617
618         avsubtitle_free (&sub);
619 }
620
621 void
622 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
623 {
624         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
625            G, third R, fourth A.
626         */
627         shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
628
629 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
630         /* Start of the first line in the subtitle */
631         uint8_t* sub_p = rect->pict.data[0];
632         /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
633            (i.e. first byte B, second G, third R, fourth A)
634         */
635         uint8_t const * palette = rect->pict.data[1];
636 #else
637         /* Start of the first line in the subtitle */
638         uint8_t* sub_p = rect->data[0];
639         /* sub_p looks up into a BGRA palette which is at rect->data[1].
640            (first byte B, second G, third R, fourth A)
641         */
642         uint8_t const * palette = rect->data[1];
643 #endif
644         /* And the stream has a map of those palette colours to colours
645            chosen by the user; created a `mapped' palette from those settings.
646         */
647         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
648         vector<RGBA> mapped_palette (rect->nb_colors);
649         for (int i = 0; i < rect->nb_colors; ++i) {
650                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
651                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
652                 if (j != colour_map.end ()) {
653                         mapped_palette[i] = j->second;
654                 } else {
655                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
656                            it is from a project that was created before this stuff was added.  Just use the
657                            colour straight from the original palette.
658                         */
659                         mapped_palette[i] = c;
660                 }
661                 palette += 4;
662         }
663
664         /* Start of the output data */
665         uint8_t* out_p = image->data()[0];
666
667         for (int y = 0; y < rect->h; ++y) {
668                 uint8_t* sub_line_p = sub_p;
669                 uint8_t* out_line_p = out_p;
670                 for (int x = 0; x < rect->w; ++x) {
671                         RGBA const p = mapped_palette[*sub_line_p++];
672                         *out_line_p++ = p.b;
673                         *out_line_p++ = p.g;
674                         *out_line_p++ = p.r;
675                         *out_line_p++ = p.a;
676                 }
677 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
678                 sub_p += rect->pict.linesize[0];
679 #else
680                 sub_p += rect->linesize[0];
681 #endif
682                 out_p += image->stride()[0];
683         }
684
685         int target_width = subtitle_codec_context()->width;
686         if (target_width == 0 && video_codec_context()) {
687                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
688                    know if it's supposed to mean something from FFmpeg's point of view.
689                 */
690                 target_width = video_codec_context()->width;
691         }
692         int target_height = subtitle_codec_context()->height;
693         if (target_height == 0 && video_codec_context()) {
694                 target_height = video_codec_context()->height;
695         }
696         DCPOMATIC_ASSERT (target_width);
697         DCPOMATIC_ASSERT (target_height);
698         dcpomatic::Rect<double> const scaled_rect (
699                 static_cast<double> (rect->x) / target_width,
700                 static_cast<double> (rect->y) / target_height,
701                 static_cast<double> (rect->w) / target_width,
702                 static_cast<double> (rect->h) / target_height
703                 );
704
705         only_text()->emit_bitmap_start (from, image, scaled_rect);
706 }
707
708 void
709 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
710 {
711         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
712            produces a single format of Dialogue: lines...
713         */
714
715         int commas = 0;
716         string text;
717         for (size_t i = 0; i < ass.length(); ++i) {
718                 if (commas < 9 && ass[i] == ',') {
719                         ++commas;
720                 } else if (commas == 9) {
721                         text += ass[i];
722                 }
723         }
724
725         if (text.empty ()) {
726                 return;
727         }
728
729         sub::RawSubtitle base;
730         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
731                 base,
732                 text,
733                 _ffmpeg_content->video->size().width,
734                 _ffmpeg_content->video->size().height
735                 );
736
737         BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
738                 only_text()->emit_plain_start (from, i);
739         }
740 }