Purge rint() and use llrint and friends.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include "player.h"
21 #include "film.h"
22 #include "ffmpeg_decoder.h"
23 #include "audio_buffers.h"
24 #include "ffmpeg_content.h"
25 #include "image_decoder.h"
26 #include "image_content.h"
27 #include "sndfile_decoder.h"
28 #include "sndfile_content.h"
29 #include "subtitle_content.h"
30 #include "subrip_decoder.h"
31 #include "subrip_content.h"
32 #include "dcp_content.h"
33 #include "job.h"
34 #include "image.h"
35 #include "raw_image_proxy.h"
36 #include "ratio.h"
37 #include "log.h"
38 #include "render_subtitles.h"
39 #include "config.h"
40 #include "content_video.h"
41 #include "player_video.h"
42 #include "frame_rate_change.h"
43 #include "dcp_content.h"
44 #include "dcp_decoder.h"
45 #include "dcp_subtitle_content.h"
46 #include "dcp_subtitle_decoder.h"
47 #include "audio_processor.h"
48 #include "playlist.h"
49 #include <boost/foreach.hpp>
50 #include <stdint.h>
51 #include <algorithm>
52
53 #include "i18n.h"
54
55 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
56
57 using std::list;
58 using std::cout;
59 using std::min;
60 using std::max;
61 using std::min;
62 using std::vector;
63 using std::pair;
64 using std::map;
65 using std::make_pair;
66 using std::copy;
67 using boost::shared_ptr;
68 using boost::weak_ptr;
69 using boost::dynamic_pointer_cast;
70 using boost::optional;
71
72 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
73         : _film (film)
74         , _playlist (playlist)
75         , _have_valid_pieces (false)
76         , _ignore_video (false)
77         , _always_burn_subtitles (false)
78 {
79         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
80         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
81         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
82         set_video_container_size (_film->frame_size ());
83
84         film_changed (Film::AUDIO_PROCESSOR);
85 }
86
87 void
88 Player::setup_pieces ()
89 {
90         list<shared_ptr<Piece> > old_pieces = _pieces;
91         _pieces.clear ();
92
93         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
94
95                 if (!i->paths_valid ()) {
96                         continue;
97                 }
98
99                 shared_ptr<Decoder> decoder;
100                 optional<FrameRateChange> frc;
101
102                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
103                 DCPTime best_overlap_t;
104                 shared_ptr<VideoContent> best_overlap;
105                 BOOST_FOREACH (shared_ptr<Content> j, _playlist->content ()) {
106                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (j);
107                         if (!vc) {
108                                 continue;
109                         }
110
111                         DCPTime const overlap = max (vc->position(), i->position()) - min (vc->end(), i->end());
112                         if (overlap > best_overlap_t) {
113                                 best_overlap = vc;
114                                 best_overlap_t = overlap;
115                         }
116                 }
117
118                 optional<FrameRateChange> best_overlap_frc;
119                 if (best_overlap) {
120                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
121                 } else {
122                         /* No video overlap; e.g. if the DCP is just audio */
123                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
124                 }
125
126                 /* FFmpeg */
127                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
128                 if (fc) {
129                         decoder.reset (new FFmpegDecoder (fc, _film->log()));
130                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
131                 }
132
133                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
134                 if (dc) {
135                         decoder.reset (new DCPDecoder (dc));
136                         frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
137                 }
138
139                 /* ImageContent */
140                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
141                 if (ic) {
142                         /* See if we can re-use an old ImageDecoder */
143                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
144                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
145                                 if (imd && imd->content() == ic) {
146                                         decoder = imd;
147                                 }
148                         }
149
150                         if (!decoder) {
151                                 decoder.reset (new ImageDecoder (ic));
152                         }
153
154                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
155                 }
156
157                 /* SndfileContent */
158                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (i);
159                 if (sc) {
160                         decoder.reset (new SndfileDecoder (sc));
161                         frc = best_overlap_frc;
162                 }
163
164                 /* SubRipContent */
165                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (i);
166                 if (rc) {
167                         decoder.reset (new SubRipDecoder (rc));
168                         frc = best_overlap_frc;
169                 }
170
171                 /* DCPSubtitleContent */
172                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
173                 if (dsc) {
174                         decoder.reset (new DCPSubtitleDecoder (dsc));
175                         frc = best_overlap_frc;
176                 }
177
178                 shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> (decoder);
179                 if (vd && _ignore_video) {
180                         vd->set_ignore_video ();
181                 }
182
183                 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
184         }
185
186         _have_valid_pieces = true;
187 }
188
189 void
190 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
191 {
192         shared_ptr<Content> c = w.lock ();
193         if (!c) {
194                 return;
195         }
196
197         if (
198                 property == ContentProperty::POSITION ||
199                 property == ContentProperty::LENGTH ||
200                 property == ContentProperty::TRIM_START ||
201                 property == ContentProperty::TRIM_END ||
202                 property == ContentProperty::PATH ||
203                 property == VideoContentProperty::VIDEO_FRAME_TYPE ||
204                 property == DCPContentProperty::CAN_BE_PLAYED
205                 ) {
206
207                 _have_valid_pieces = false;
208                 Changed (frequent);
209
210         } else if (
211                 property == SubtitleContentProperty::USE_SUBTITLES ||
212                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
213                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
214                 property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
215                 property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
216                 property == SubtitleContentProperty::FONTS ||
217                 property == VideoContentProperty::VIDEO_CROP ||
218                 property == VideoContentProperty::VIDEO_SCALE ||
219                 property == VideoContentProperty::VIDEO_FRAME_RATE ||
220                 property == VideoContentProperty::VIDEO_FADE_IN ||
221                 property == VideoContentProperty::VIDEO_FADE_OUT ||
222                 property == VideoContentProperty::COLOUR_CONVERSION
223                 ) {
224
225                 Changed (frequent);
226         }
227 }
228
229 void
230 Player::set_video_container_size (dcp::Size s)
231 {
232         _video_container_size = s;
233
234         _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
235         _black_image->make_black ();
236 }
237
238 void
239 Player::playlist_changed ()
240 {
241         _have_valid_pieces = false;
242         Changed (false);
243 }
244
245 void
246 Player::film_changed (Film::Property p)
247 {
248         /* Here we should notice Film properties that affect our output, and
249            alert listeners that our output now would be different to how it was
250            last time we were run.
251         */
252
253         if (p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
254                 Changed (false);
255         } else if (p == Film::AUDIO_PROCESSOR) {
256                 if (_film->audio_processor ()) {
257                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
258                 }
259         }
260 }
261
262 list<PositionImage>
263 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
264 {
265         list<PositionImage> all;
266
267         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
268                 if (!i->image) {
269                         continue;
270                 }
271
272                 /* We will scale the subtitle up to fit _video_container_size */
273                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
274
275                 /* Then we need a corrective translation, consisting of two parts:
276                  *
277                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
278                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
279                  *
280                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
281                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
282                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
283                  *
284                  * Combining these two translations gives these expressions.
285                  */
286
287                 all.push_back (
288                         PositionImage (
289                                 i->image->scale (
290                                         scaled_size,
291                                         dcp::YUV_TO_RGB_REC601,
292                                         i->image->pixel_format (),
293                                         true
294                                         ),
295                                 Position<int> (
296                                         lrint (_video_container_size.width * i->rectangle.x),
297                                         lrint (_video_container_size.height * i->rectangle.y)
298                                         )
299                                 )
300                         );
301         }
302
303         return all;
304 }
305
306 shared_ptr<PlayerVideo>
307 Player::black_player_video_frame (DCPTime time) const
308 {
309         return shared_ptr<PlayerVideo> (
310                 new PlayerVideo (
311                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
312                         time,
313                         Crop (),
314                         optional<double> (),
315                         _video_container_size,
316                         _video_container_size,
317                         EYES_BOTH,
318                         PART_WHOLE,
319                         PresetColourConversion::all().front().conversion
320                 )
321         );
322 }
323
324 /** @return All PlayerVideos at the given time (there may be two frames for 3D) */
325 list<shared_ptr<PlayerVideo> >
326 Player::get_video (DCPTime time, bool accurate)
327 {
328         if (!_have_valid_pieces) {
329                 setup_pieces ();
330         }
331
332         /* Find subtitles for possible burn-in */
333
334         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true);
335
336         list<PositionImage> sub_images;
337
338         /* Image subtitles */
339         list<PositionImage> c = transform_image_subtitles (ps.image);
340         copy (c.begin(), c.end(), back_inserter (sub_images));
341
342         /* Text subtitles (rendered to an image) */
343         if (!ps.text.empty ()) {
344                 list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
345                 copy (s.begin (), s.end (), back_inserter (sub_images));
346         }
347
348         optional<PositionImage> subtitles;
349         if (!sub_images.empty ()) {
350                 subtitles = merge (sub_images);
351         }
352
353         /* Find pieces containing video which is happening now */
354
355         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
356                 time,
357                 time + DCPTime::from_frames (1, _film->video_frame_rate ()) - DCPTime::delta()
358                 );
359
360         list<shared_ptr<PlayerVideo> > pvf;
361
362         if (ov.empty ()) {
363                 /* No video content at this time */
364                 pvf.push_back (black_player_video_frame (time));
365         } else {
366                 /* Some video content at this time */
367                 shared_ptr<Piece> last = *(ov.rbegin ());
368                 VideoFrameType const last_type = dynamic_pointer_cast<VideoContent> (last->content)->video_frame_type ();
369
370                 /* Get video from appropriate piece(s) */
371                 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
372
373                         shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
374                         DCPOMATIC_ASSERT (decoder);
375                         shared_ptr<VideoContent> video_content = dynamic_pointer_cast<VideoContent> (piece->content);
376                         DCPOMATIC_ASSERT (video_content);
377
378                         bool const use =
379                                 /* always use the last video */
380                                 piece == last ||
381                                 /* with a corresponding L/R eye if appropriate */
382                                 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && video_content->video_frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
383                                 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && video_content->video_frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
384
385                         if (use) {
386                                 /* We want to use this piece */
387                                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
388                                 if (content_video.empty ()) {
389                                         pvf.push_back (black_player_video_frame (time));
390                                 } else {
391                                         dcp::Size image_size = video_content->scale().size (video_content, _video_container_size, _film->frame_size ());
392
393                                         for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
394                                                 pvf.push_back (
395                                                         shared_ptr<PlayerVideo> (
396                                                                 new PlayerVideo (
397                                                                         i->image,
398                                                                         content_video_to_dcp (piece, i->frame),
399                                                                         video_content->crop (),
400                                                                         video_content->fade (i->frame),
401                                                                         image_size,
402                                                                         _video_container_size,
403                                                                         i->eyes,
404                                                                         i->part,
405                                                                         video_content->colour_conversion ()
406                                                                         )
407                                                                 )
408                                                         );
409                                         }
410                                 }
411                         } else {
412                                 /* Discard unused video */
413                                 decoder->get_video (dcp_to_content_video (piece, time), accurate);
414                         }
415                 }
416         }
417
418         if (subtitles) {
419                 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
420                         p->set_subtitle (subtitles.get ());
421                 }
422         }
423
424         return pvf;
425 }
426
427 shared_ptr<AudioBuffers>
428 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
429 {
430         if (!_have_valid_pieces) {
431                 setup_pieces ();
432         }
433
434         Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
435
436         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
437         audio->make_silent ();
438
439         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
440         if (ov.empty ()) {
441                 return audio;
442         }
443
444         for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
445
446                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
447                 DCPOMATIC_ASSERT (content);
448                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
449                 DCPOMATIC_ASSERT (decoder);
450
451                 /* The time that we should request from the content */
452                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
453                 Frame request_frames = length_frames;
454                 DCPTime offset;
455                 if (request < DCPTime ()) {
456                         /* We went off the start of the content, so we will need to offset
457                            the stuff we get back.
458                         */
459                         offset = -request;
460                         request_frames += request.frames_round (_film->audio_frame_rate ());
461                         if (request_frames < 0) {
462                                 request_frames = 0;
463                         }
464                         request = DCPTime ();
465                 }
466
467                 Frame const content_frame = dcp_to_resampled_audio (*i, request);
468
469                 BOOST_FOREACH (AudioStreamPtr j, content->audio_streams ()) {
470
471                         if (j->channels() == 0) {
472                                 /* Some content (e.g. DCPs) can have streams with no channels */
473                                 continue;
474                         }
475
476                         /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
477                         ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate);
478
479                         /* Gain */
480                         if (content->audio_gain() != 0) {
481                                 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
482                                 gain->apply_gain (content->audio_gain ());
483                                 all.audio = gain;
484                         }
485
486                         /* Remap channels */
487                         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
488                         dcp_mapped->make_silent ();
489                         AudioMapping map = j->mapping ();
490                         for (int i = 0; i < map.input_channels(); ++i) {
491                                 for (int j = 0; j < _film->audio_channels(); ++j) {
492                                         if (map.get (i, j) > 0) {
493                                                 dcp_mapped->accumulate_channel (
494                                                         all.audio.get(),
495                                                         i,
496                                                         j,
497                                                         map.get (i, j)
498                                                         );
499                                         }
500                                 }
501                         }
502
503                         if (_audio_processor) {
504                                 dcp_mapped = _audio_processor->run (dcp_mapped);
505                         }
506
507                         all.audio = dcp_mapped;
508
509                         audio->accumulate_frames (
510                                 all.audio.get(),
511                                 content_frame - all.frame,
512                                 offset.frames_round (_film->audio_frame_rate()),
513                                 min (Frame (all.audio->frames()), request_frames)
514                                 );
515                 }
516         }
517
518         return audio;
519 }
520
521 Frame
522 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
523 {
524         shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
525         DCPTime s = t - piece->content->position ();
526         s = min (piece->content->length_after_trim(), s);
527         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start ()).frames_round (vc->video_frame_rate ());
528 }
529
530 DCPTime
531 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
532 {
533         shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
534         ContentTime const c = ContentTime::from_frames (f, vc->video_frame_rate ()) - piece->content->trim_start ();
535         return max (DCPTime (), DCPTime (c, piece->frc) + piece->content->position ());
536 }
537
538 Frame
539 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
540 {
541         DCPTime s = t - piece->content->position ();
542         s = min (piece->content->length_after_trim(), s);
543         /* See notes in dcp_to_content_video */
544         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
545 }
546
547 ContentTime
548 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
549 {
550         DCPTime s = t - piece->content->position ();
551         s = min (piece->content->length_after_trim(), s);
552         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
553 }
554
555 /** @param burnt true to return only subtitles to be burnt, false to return only
556  *  subtitles that should not be burnt.  This parameter will be ignored if
557  *  _always_burn_subtitles is true; in this case, all subtitles will be returned.
558  */
559 PlayerSubtitles
560 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt)
561 {
562         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
563
564         PlayerSubtitles ps (time, length);
565
566         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
567                 shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
568                 if (!subtitle_content->use_subtitles () || (!_always_burn_subtitles && (burnt != subtitle_content->burn_subtitles ()))) {
569                         continue;
570                 }
571
572                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
573                 ContentTime const from = dcp_to_content_subtitle (*j, time);
574                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
575                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
576
577                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting);
578                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
579
580                         /* Apply content's subtitle offsets */
581                         i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
582                         i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
583
584                         /* Apply content's subtitle scale */
585                         i->sub.rectangle.width *= subtitle_content->subtitle_x_scale ();
586                         i->sub.rectangle.height *= subtitle_content->subtitle_y_scale ();
587
588                         /* Apply a corrective translation to keep the subtitle centred after that scale */
589                         i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_x_scale() - 1);
590                         i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_y_scale() - 1);
591
592                         ps.image.push_back (i->sub);
593                 }
594
595                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting);
596                 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
597                         BOOST_FOREACH (dcp::SubtitleString& s, ts.subs) {
598                                 s.set_h_position (s.h_position() + subtitle_content->subtitle_x_offset ());
599                                 s.set_v_position (s.v_position() + subtitle_content->subtitle_y_offset ());
600                                 float const xs = subtitle_content->subtitle_x_scale();
601                                 float const ys = subtitle_content->subtitle_y_scale();
602                                 float const average = s.size() * (xs + ys) / 2;
603                                 s.set_size (average);
604                                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
605                                         s.set_aspect_adjust (xs / ys);
606                                 }
607                                 ps.text.push_back (s);
608                                 ps.add_fonts (subtitle_content->fonts ());
609                         }
610                 }
611         }
612
613         return ps;
614 }
615
616 list<shared_ptr<Font> >
617 Player::get_subtitle_fonts ()
618 {
619         if (!_have_valid_pieces) {
620                 setup_pieces ();
621         }
622
623         list<shared_ptr<Font> > fonts;
624         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
625                 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (p->content);
626                 if (sc) {
627                         /* XXX: things may go wrong if there are duplicate font IDs
628                            with different font files.
629                         */
630                         list<shared_ptr<Font> > f = sc->fonts ();
631                         copy (f.begin(), f.end(), back_inserter (fonts));
632                 }
633         }
634
635         return fonts;
636 }
637
638 /** Set this player never to produce any video data */
639 void
640 Player::set_ignore_video ()
641 {
642         _ignore_video = true;
643 }
644
645 /** Set whether or not this player should always burn text subtitles into the image,
646  *  regardless of the content settings.
647  *  @param burn true to always burn subtitles, false to obey content settings.
648  */
649 void
650 Player::set_always_burn_subtitles (bool burn)
651 {
652         _always_burn_subtitles = burn;
653 }