Subtitle rearrangements.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include "player.h"
21 #include "film.h"
22 #include "ffmpeg_decoder.h"
23 #include "audio_buffers.h"
24 #include "ffmpeg_content.h"
25 #include "image_decoder.h"
26 #include "image_content.h"
27 #include "sndfile_decoder.h"
28 #include "sndfile_content.h"
29 #include "subtitle_content.h"
30 #include "text_subtitle_decoder.h"
31 #include "text_subtitle_content.h"
32 #include "dcp_content.h"
33 #include "job.h"
34 #include "image.h"
35 #include "raw_image_proxy.h"
36 #include "ratio.h"
37 #include "log.h"
38 #include "render_subtitles.h"
39 #include "config.h"
40 #include "content_video.h"
41 #include "player_video.h"
42 #include "frame_rate_change.h"
43 #include "dcp_content.h"
44 #include "dcp_decoder.h"
45 #include "dcp_subtitle_content.h"
46 #include "dcp_subtitle_decoder.h"
47 #include "audio_processor.h"
48 #include "playlist.h"
49 #include "referenced_reel_asset.h"
50 #include <dcp/reel.h>
51 #include <dcp/reel_sound_asset.h>
52 #include <dcp/reel_subtitle_asset.h>
53 #include <dcp/reel_picture_asset.h>
54 #include <boost/foreach.hpp>
55 #include <stdint.h>
56 #include <algorithm>
57 #include <iostream>
58
59 #include "i18n.h"
60
61 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
62
63 using std::list;
64 using std::cout;
65 using std::min;
66 using std::max;
67 using std::min;
68 using std::vector;
69 using std::pair;
70 using std::map;
71 using std::make_pair;
72 using std::copy;
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
78
79 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
80         : _film (film)
81         , _playlist (playlist)
82         , _have_valid_pieces (false)
83         , _ignore_video (false)
84         , _ignore_audio (false)
85         , _always_burn_subtitles (false)
86         , _fast (false)
87         , _play_referenced (false)
88 {
89         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
90         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
91         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
92         set_video_container_size (_film->frame_size ());
93
94         film_changed (Film::AUDIO_PROCESSOR);
95 }
96
97 void
98 Player::setup_pieces ()
99 {
100         list<shared_ptr<Piece> > old_pieces = _pieces;
101         _pieces.clear ();
102
103         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
104
105                 if (!i->paths_valid ()) {
106                         continue;
107                 }
108
109                 shared_ptr<Decoder> decoder;
110                 optional<FrameRateChange> frc;
111
112                 /* FFmpeg */
113                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
114                 if (fc) {
115                         decoder.reset (new FFmpegDecoder (fc, _film->log(), _fast));
116                         frc = FrameRateChange (fc->video->video_frame_rate(), _film->video_frame_rate());
117                 }
118
119                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
120                 if (dc) {
121                         decoder.reset (new DCPDecoder (dc, _film->log(), _fast));
122                         frc = FrameRateChange (dc->video->video_frame_rate(), _film->video_frame_rate());
123                 }
124
125                 /* ImageContent */
126                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
127                 if (ic) {
128                         /* See if we can re-use an old ImageDecoder */
129                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
130                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
131                                 if (imd && imd->content() == ic) {
132                                         decoder = imd;
133                                 }
134                         }
135
136                         if (!decoder) {
137                                 decoder.reset (new ImageDecoder (ic, _film->log()));
138                         }
139
140                         frc = FrameRateChange (ic->video->video_frame_rate(), _film->video_frame_rate());
141                 }
142
143                 /* SndfileContent */
144                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (i);
145                 if (sc) {
146                         decoder.reset (new SndfileDecoder (sc, _fast));
147
148                         /* Work out a FrameRateChange for the best overlap video for this content */
149                         DCPTime best_overlap_t;
150                         shared_ptr<Content> best_overlap;
151                         BOOST_FOREACH (shared_ptr<Content> j, _playlist->content ()) {
152                                 if (!j->video) {
153                                         continue;
154                                 }
155
156                                 DCPTime const overlap = min (j->end(), i->end()) - max (j->position(), i->position());
157                                 if (overlap > best_overlap_t) {
158                                         best_overlap = j;
159                                         best_overlap_t = overlap;
160                                 }
161                         }
162
163                         if (best_overlap) {
164                                 frc = FrameRateChange (best_overlap->video->video_frame_rate(), _film->video_frame_rate ());
165                         } else {
166                                 /* No video overlap; e.g. if the DCP is just audio */
167                                 frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
168                         }
169                 }
170
171                 /* It's questionable whether subtitle content should have a video frame rate; perhaps
172                    it should be assumed that any subtitle content has been prepared at the same rate
173                    as simultaneous video content (like we do with audio).
174                 */
175
176                 /* TextSubtitleContent */
177                 shared_ptr<const TextSubtitleContent> rc = dynamic_pointer_cast<const TextSubtitleContent> (i);
178                 if (rc) {
179                         decoder.reset (new TextSubtitleDecoder (rc));
180                         frc = FrameRateChange (rc->subtitle_video_frame_rate(), _film->video_frame_rate());
181                 }
182
183                 /* DCPSubtitleContent */
184                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
185                 if (dsc) {
186                         decoder.reset (new DCPSubtitleDecoder (dsc));
187                         frc = FrameRateChange (dsc->subtitle_video_frame_rate(), _film->video_frame_rate());
188                 }
189
190                 shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> (decoder);
191                 if (vd && _ignore_video) {
192                         vd->set_ignore_video ();
193                 }
194
195                 shared_ptr<AudioDecoder> ad = dynamic_pointer_cast<AudioDecoder> (decoder);
196                 if (ad && _ignore_audio) {
197                         ad->set_ignore_audio ();
198                 }
199
200                 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
201         }
202
203         _have_valid_pieces = true;
204 }
205
206 void
207 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
208 {
209         shared_ptr<Content> c = w.lock ();
210         if (!c) {
211                 return;
212         }
213
214         if (
215                 property == ContentProperty::POSITION ||
216                 property == ContentProperty::LENGTH ||
217                 property == ContentProperty::TRIM_START ||
218                 property == ContentProperty::TRIM_END ||
219                 property == ContentProperty::PATH ||
220                 property == VideoContentProperty::VIDEO_FRAME_TYPE ||
221                 property == DCPContentProperty::CAN_BE_PLAYED ||
222                 property == SubtitleContentProperty::SUBTITLE_COLOUR ||
223                 property == SubtitleContentProperty::SUBTITLE_OUTLINE ||
224                 property == SubtitleContentProperty::SUBTITLE_OUTLINE_COLOUR ||
225                 property == FFmpegContentProperty::SUBTITLE_STREAM
226                 ) {
227
228                 _have_valid_pieces = false;
229                 Changed (frequent);
230
231         } else if (
232                 property == SubtitleContentProperty::USE_SUBTITLES ||
233                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
234                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
235                 property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
236                 property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
237                 property == SubtitleContentProperty::FONTS ||
238                 property == VideoContentProperty::VIDEO_CROP ||
239                 property == VideoContentProperty::VIDEO_SCALE ||
240                 property == VideoContentProperty::VIDEO_FRAME_RATE ||
241                 property == VideoContentProperty::VIDEO_FADE_IN ||
242                 property == VideoContentProperty::VIDEO_FADE_OUT ||
243                 property == VideoContentProperty::COLOUR_CONVERSION
244                 ) {
245
246                 Changed (frequent);
247         }
248 }
249
250 void
251 Player::set_video_container_size (dcp::Size s)
252 {
253         _video_container_size = s;
254
255         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
256         _black_image->make_black ();
257 }
258
259 void
260 Player::playlist_changed ()
261 {
262         _have_valid_pieces = false;
263         Changed (false);
264 }
265
266 void
267 Player::film_changed (Film::Property p)
268 {
269         /* Here we should notice Film properties that affect our output, and
270            alert listeners that our output now would be different to how it was
271            last time we were run.
272         */
273
274         if (p == Film::CONTAINER) {
275                 Changed (false);
276         } else if (p == Film::VIDEO_FRAME_RATE) {
277                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
278                    so we need new pieces here.
279                 */
280                 _have_valid_pieces = false;
281                 Changed (false);
282         } else if (p == Film::AUDIO_PROCESSOR) {
283                 if (_film->audio_processor ()) {
284                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
285                 }
286         }
287 }
288
289 list<PositionImage>
290 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
291 {
292         list<PositionImage> all;
293
294         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
295                 if (!i->image) {
296                         continue;
297                 }
298
299                 /* We will scale the subtitle up to fit _video_container_size */
300                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
301
302                 /* Then we need a corrective translation, consisting of two parts:
303                  *
304                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
305                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
306                  *
307                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
308                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
309                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
310                  *
311                  * Combining these two translations gives these expressions.
312                  */
313
314                 all.push_back (
315                         PositionImage (
316                                 i->image->scale (
317                                         scaled_size,
318                                         dcp::YUV_TO_RGB_REC601,
319                                         i->image->pixel_format (),
320                                         true,
321                                         _fast
322                                         ),
323                                 Position<int> (
324                                         lrint (_video_container_size.width * i->rectangle.x),
325                                         lrint (_video_container_size.height * i->rectangle.y)
326                                         )
327                                 )
328                         );
329         }
330
331         return all;
332 }
333
334 shared_ptr<PlayerVideo>
335 Player::black_player_video_frame (DCPTime time) const
336 {
337         return shared_ptr<PlayerVideo> (
338                 new PlayerVideo (
339                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
340                         time,
341                         Crop (),
342                         optional<double> (),
343                         _video_container_size,
344                         _video_container_size,
345                         EYES_BOTH,
346                         PART_WHOLE,
347                         PresetColourConversion::all().front().conversion
348                 )
349         );
350 }
351
352 /** @return All PlayerVideos at the given time.  There may be none if the content
353  *  at `time' is a DCP which we are passing through (i.e. referring to by reference)
354  *  or 2 if we have 3D.
355  */
356 list<shared_ptr<PlayerVideo> >
357 Player::get_video (DCPTime time, bool accurate)
358 {
359         if (!_have_valid_pieces) {
360                 setup_pieces ();
361         }
362
363         /* Find subtitles for possible burn-in */
364
365         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate);
366
367         list<PositionImage> sub_images;
368
369         /* Image subtitles */
370         list<PositionImage> c = transform_image_subtitles (ps.image);
371         copy (c.begin(), c.end(), back_inserter (sub_images));
372
373         /* Text subtitles (rendered to an image) */
374         if (!ps.text.empty ()) {
375                 list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
376                 copy (s.begin (), s.end (), back_inserter (sub_images));
377         }
378
379         optional<PositionImage> subtitles;
380         if (!sub_images.empty ()) {
381                 subtitles = merge (sub_images);
382         }
383
384         /* Find pieces containing video which is happening now */
385
386         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
387                 time,
388                 time + DCPTime::from_frames (1, _film->video_frame_rate ())
389                 );
390
391         list<shared_ptr<PlayerVideo> > pvf;
392
393         if (ov.empty ()) {
394                 /* No video content at this time */
395                 pvf.push_back (black_player_video_frame (time));
396         } else {
397                 /* Some video content at this time */
398                 shared_ptr<Piece> last = *(ov.rbegin ());
399                 VideoFrameType const last_type = last->content->video->video_frame_type ();
400
401                 /* Get video from appropriate piece(s) */
402                 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
403
404                         shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
405                         DCPOMATIC_ASSERT (decoder);
406
407                         shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (piece->content);
408                         if (dcp_content && dcp_content->reference_video () && !_play_referenced) {
409                                 continue;
410                         }
411
412                         bool const use =
413                                 /* always use the last video */
414                                 piece == last ||
415                                 /* with a corresponding L/R eye if appropriate */
416                                 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && piece->content->video->video_frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
417                                 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && piece->content->video->video_frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
418
419                         if (use) {
420                                 /* We want to use this piece */
421                                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
422                                 if (content_video.empty ()) {
423                                         pvf.push_back (black_player_video_frame (time));
424                                 } else {
425                                         dcp::Size image_size = piece->content->video->scale().size (
426                                                 piece->content->video, _video_container_size, _film->frame_size ()
427                                                 );
428
429                                         for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
430                                                 pvf.push_back (
431                                                         shared_ptr<PlayerVideo> (
432                                                                 new PlayerVideo (
433                                                                         i->image,
434                                                                         content_video_to_dcp (piece, i->frame),
435                                                                         piece->content->video->crop (),
436                                                                         piece->content->video->fade (i->frame),
437                                                                         image_size,
438                                                                         _video_container_size,
439                                                                         i->eyes,
440                                                                         i->part,
441                                                                         piece->content->video->colour_conversion ()
442                                                                         )
443                                                                 )
444                                                         );
445                                         }
446                                 }
447                         } else {
448                                 /* Discard unused video */
449                                 decoder->get_video (dcp_to_content_video (piece, time), accurate);
450                         }
451                 }
452         }
453
454         if (subtitles) {
455                 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
456                         p->set_subtitle (subtitles.get ());
457                 }
458         }
459
460         return pvf;
461 }
462
463 /** @return Audio data or 0 if the only audio data here is referenced DCP data */
464 shared_ptr<AudioBuffers>
465 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
466 {
467         if (!_have_valid_pieces) {
468                 setup_pieces ();
469         }
470
471         Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
472
473         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
474         audio->make_silent ();
475
476         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
477         if (ov.empty ()) {
478                 return audio;
479         }
480
481         bool all_referenced = true;
482         BOOST_FOREACH (shared_ptr<Piece> i, ov) {
483                 shared_ptr<AudioContent> audio_content = dynamic_pointer_cast<AudioContent> (i->content);
484                 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (i->content);
485                 if (audio_content && (!dcp_content || !dcp_content->reference_audio ())) {
486                         /* There is audio content which is not from a DCP or not set to be referenced */
487                         all_referenced = false;
488                 }
489         }
490
491         if (all_referenced && !_play_referenced) {
492                 return shared_ptr<AudioBuffers> ();
493         }
494
495         BOOST_FOREACH (shared_ptr<Piece> i, ov) {
496
497                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (i->content);
498                 DCPOMATIC_ASSERT (content);
499                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> (i->decoder);
500                 DCPOMATIC_ASSERT (decoder);
501
502                 /* The time that we should request from the content */
503                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
504                 Frame request_frames = length_frames;
505                 DCPTime offset;
506                 if (request < DCPTime ()) {
507                         /* We went off the start of the content, so we will need to offset
508                            the stuff we get back.
509                         */
510                         offset = -request;
511                         request_frames += request.frames_round (_film->audio_frame_rate ());
512                         if (request_frames < 0) {
513                                 request_frames = 0;
514                         }
515                         request = DCPTime ();
516                 }
517
518                 Frame const content_frame = dcp_to_resampled_audio (i, request);
519
520                 BOOST_FOREACH (AudioStreamPtr j, content->audio_streams ()) {
521
522                         if (j->channels() == 0) {
523                                 /* Some content (e.g. DCPs) can have streams with no channels */
524                                 continue;
525                         }
526
527                         /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
528                         ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate);
529
530                         /* Gain */
531                         if (content->audio_gain() != 0) {
532                                 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
533                                 gain->apply_gain (content->audio_gain ());
534                                 all.audio = gain;
535                         }
536
537                         /* Remap channels */
538                         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
539                         dcp_mapped->make_silent ();
540                         AudioMapping map = j->mapping ();
541                         for (int i = 0; i < map.input_channels(); ++i) {
542                                 for (int j = 0; j < _film->audio_channels(); ++j) {
543                                         if (map.get (i, j) > 0) {
544                                                 dcp_mapped->accumulate_channel (
545                                                         all.audio.get(),
546                                                         i,
547                                                         j,
548                                                         map.get (i, j)
549                                                         );
550                                         }
551                                 }
552                         }
553
554                         if (_audio_processor) {
555                                 dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ());
556                         }
557
558                         all.audio = dcp_mapped;
559
560                         audio->accumulate_frames (
561                                 all.audio.get(),
562                                 content_frame - all.frame,
563                                 offset.frames_round (_film->audio_frame_rate()),
564                                 min (Frame (all.audio->frames()), request_frames)
565                                 );
566                 }
567         }
568
569         return audio;
570 }
571
572 Frame
573 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
574 {
575         shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
576         DCPTime s = t - piece->content->position ();
577         s = min (piece->content->length_after_trim(), s);
578         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
579
580         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
581            then convert that ContentTime to frames at the content's rate.  However this fails for
582            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
583            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
584
585            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
586         */
587         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
588 }
589
590 DCPTime
591 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
592 {
593         shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
594         /* See comment in dcp_to_content_video */
595         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
596         return max (DCPTime (), d + piece->content->position ());
597 }
598
599 Frame
600 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
601 {
602         DCPTime s = t - piece->content->position ();
603         s = min (piece->content->length_after_trim(), s);
604         /* See notes in dcp_to_content_video */
605         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
606 }
607
608 ContentTime
609 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
610 {
611         DCPTime s = t - piece->content->position ();
612         s = min (piece->content->length_after_trim(), s);
613         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
614 }
615
616 DCPTime
617 Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
618 {
619         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
620 }
621
622 /** @param burnt true to return only subtitles to be burnt, false to return only
623  *  subtitles that should not be burnt.  This parameter will be ignored if
624  *  _always_burn_subtitles is true; in this case, all subtitles will be returned.
625  */
626 PlayerSubtitles
627 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate)
628 {
629         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
630
631         PlayerSubtitles ps (time, length);
632
633         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
634                 if (!(*j)->content->subtitle->use_subtitles () || (!_always_burn_subtitles && (burnt != (*j)->content->subtitle->burn_subtitles ()))) {
635                         continue;
636                 }
637
638                 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> ((*j)->content);
639                 if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) {
640                         continue;
641                 }
642
643                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
644                 ContentTime const from = dcp_to_content_subtitle (*j, time);
645                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
646                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
647
648                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting, accurate);
649                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
650
651                         /* Apply content's subtitle offsets */
652                         i->sub.rectangle.x += (*j)->content->subtitle->subtitle_x_offset ();
653                         i->sub.rectangle.y += (*j)->content->subtitle->subtitle_y_offset ();
654
655                         /* Apply content's subtitle scale */
656                         i->sub.rectangle.width *= (*j)->content->subtitle->subtitle_x_scale ();
657                         i->sub.rectangle.height *= (*j)->content->subtitle->subtitle_y_scale ();
658
659                         /* Apply a corrective translation to keep the subtitle centred after that scale */
660                         i->sub.rectangle.x -= i->sub.rectangle.width * ((*j)->content->subtitle->subtitle_x_scale() - 1);
661                         i->sub.rectangle.y -= i->sub.rectangle.height * ((*j)->content->subtitle->subtitle_y_scale() - 1);
662
663                         ps.image.push_back (i->sub);
664                 }
665
666                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting, accurate);
667                 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
668                         BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
669                                 s.set_h_position (s.h_position() + (*j)->content->subtitle->subtitle_x_offset ());
670                                 s.set_v_position (s.v_position() + (*j)->content->subtitle->subtitle_y_offset ());
671                                 float const xs = (*j)->content->subtitle->subtitle_x_scale();
672                                 float const ys = (*j)->content->subtitle->subtitle_y_scale();
673                                 float size = s.size();
674
675                                 /* Adjust size to express the common part of the scaling;
676                                    e.g. if xs = ys = 0.5 we scale size by 2.
677                                 */
678                                 if (xs > 1e-5 && ys > 1e-5) {
679                                         size *= 1 / min (1 / xs, 1 / ys);
680                                 }
681                                 s.set_size (size);
682
683                                 /* Then express aspect ratio changes */
684                                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
685                                         s.set_aspect_adjust (xs / ys);
686                                 }
687                                 s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000));
688                                 s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000));
689                                 ps.text.push_back (s);
690                                 ps.add_fonts ((*j)->content->subtitle->fonts ());
691                         }
692                 }
693         }
694
695         return ps;
696 }
697
698 list<shared_ptr<Font> >
699 Player::get_subtitle_fonts ()
700 {
701         if (!_have_valid_pieces) {
702                 setup_pieces ();
703         }
704
705         list<shared_ptr<Font> > fonts;
706         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
707                 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (p->content);
708                 if (sc) {
709                         /* XXX: things may go wrong if there are duplicate font IDs
710                            with different font files.
711                         */
712                         list<shared_ptr<Font> > f = sc->fonts ();
713                         copy (f.begin(), f.end(), back_inserter (fonts));
714                 }
715         }
716
717         return fonts;
718 }
719
720 /** Set this player never to produce any video data */
721 void
722 Player::set_ignore_video ()
723 {
724         _ignore_video = true;
725 }
726
727 /** Set this player never to produce any audio data */
728 void
729 Player::set_ignore_audio ()
730 {
731         _ignore_audio = true;
732 }
733
734 /** Set whether or not this player should always burn text subtitles into the image,
735  *  regardless of the content settings.
736  *  @param burn true to always burn subtitles, false to obey content settings.
737  */
738 void
739 Player::set_always_burn_subtitles (bool burn)
740 {
741         _always_burn_subtitles = burn;
742 }
743
744 void
745 Player::set_fast ()
746 {
747         _fast = true;
748         _have_valid_pieces = false;
749 }
750
751 void
752 Player::set_play_referenced ()
753 {
754         _play_referenced = true;
755         _have_valid_pieces = false;
756 }
757
758 list<ReferencedReelAsset>
759 Player::get_reel_assets ()
760 {
761         list<ReferencedReelAsset> a;
762
763         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
764                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
765                 if (!j) {
766                         continue;
767                 }
768
769                 scoped_ptr<DCPDecoder> decoder;
770                 try {
771                         decoder.reset (new DCPDecoder (j, _film->log(), false));
772                 } catch (...) {
773                         return a;
774                 }
775
776                 int64_t offset = 0;
777                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
778                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
779                         if (j->reference_video ()) {
780                                 a.push_back (
781                                         ReferencedReelAsset (
782                                                 k->main_picture (),
783                                                 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate()))
784                                                 )
785                                         );
786                         }
787
788                         if (j->reference_audio ()) {
789                                 a.push_back (
790                                         ReferencedReelAsset (
791                                                 k->main_sound (),
792                                                 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate()))
793                                                 )
794                                         );
795                         }
796
797                         if (j->reference_subtitle ()) {
798                                 DCPOMATIC_ASSERT (k->main_subtitle ());
799                                 a.push_back (
800                                         ReferencedReelAsset (
801                                                 k->main_subtitle (),
802                                                 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate()))
803                                                 )
804                                         );
805                         }
806
807                         /* Assume that main picture duration is the length of the reel */
808                         offset += k->main_picture()->duration ();
809                 }
810         }
811
812         return a;
813 }