Apply subtitle scale (ish) to text subs; should help with #413.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include "player.h"
21 #include "film.h"
22 #include "ffmpeg_decoder.h"
23 #include "audio_buffers.h"
24 #include "ffmpeg_content.h"
25 #include "image_decoder.h"
26 #include "image_content.h"
27 #include "sndfile_decoder.h"
28 #include "sndfile_content.h"
29 #include "subtitle_content.h"
30 #include "subrip_decoder.h"
31 #include "subrip_content.h"
32 #include "dcp_content.h"
33 #include "playlist.h"
34 #include "job.h"
35 #include "image.h"
36 #include "raw_image_proxy.h"
37 #include "ratio.h"
38 #include "log.h"
39 #include "scaler.h"
40 #include "render_subtitles.h"
41 #include "config.h"
42 #include "content_video.h"
43 #include "player_video.h"
44 #include "frame_rate_change.h"
45 #include "dcp_content.h"
46 #include "dcp_decoder.h"
47 #include "dcp_subtitle_content.h"
48 #include "dcp_subtitle_decoder.h"
49 #include <boost/foreach.hpp>
50 #include <stdint.h>
51 #include <algorithm>
52
53 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
54
55 using std::list;
56 using std::cout;
57 using std::min;
58 using std::max;
59 using std::min;
60 using std::vector;
61 using std::pair;
62 using std::map;
63 using std::make_pair;
64 using boost::shared_ptr;
65 using boost::weak_ptr;
66 using boost::dynamic_pointer_cast;
67 using boost::optional;
68
69 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
70         : _film (f)
71         , _playlist (p)
72         , _have_valid_pieces (false)
73         , _approximate_size (false)
74 {
75         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
76         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
77         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
78         set_video_container_size (_film->frame_size ());
79 }
80
81 void
82 Player::setup_pieces ()
83 {
84         list<shared_ptr<Piece> > old_pieces = _pieces;
85         _pieces.clear ();
86
87         ContentList content = _playlist->content ();
88
89         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
90
91                 if (!(*i)->paths_valid ()) {
92                         continue;
93                 }
94                 
95                 shared_ptr<Decoder> decoder;
96                 optional<FrameRateChange> frc;
97
98                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
99                 DCPTime best_overlap_t;
100                 shared_ptr<VideoContent> best_overlap;
101                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
102                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
103                         if (!vc) {
104                                 continue;
105                         }
106                         
107                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
108                         if (overlap > best_overlap_t) {
109                                 best_overlap = vc;
110                                 best_overlap_t = overlap;
111                         }
112                 }
113
114                 optional<FrameRateChange> best_overlap_frc;
115                 if (best_overlap) {
116                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
117                 } else {
118                         /* No video overlap; e.g. if the DCP is just audio */
119                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
120                 }
121
122                 /* FFmpeg */
123                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
124                 if (fc) {
125                         decoder.reset (new FFmpegDecoder (fc, _film->log()));
126                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
127                 }
128
129                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (*i);
130                 if (dc) {
131                         decoder.reset (new DCPDecoder (dc, _film->log ()));
132                         frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
133                 }
134
135                 /* ImageContent */
136                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
137                 if (ic) {
138                         /* See if we can re-use an old ImageDecoder */
139                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
140                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
141                                 if (imd && imd->content() == ic) {
142                                         decoder = imd;
143                                 }
144                         }
145
146                         if (!decoder) {
147                                 decoder.reset (new ImageDecoder (ic));
148                         }
149
150                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
151                 }
152
153                 /* SndfileContent */
154                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
155                 if (sc) {
156                         decoder.reset (new SndfileDecoder (sc));
157                         frc = best_overlap_frc;
158                 }
159
160                 /* SubRipContent */
161                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
162                 if (rc) {
163                         decoder.reset (new SubRipDecoder (rc));
164                         frc = best_overlap_frc;
165                 }
166
167                 /* DCPSubtitleContent */
168                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (*i);
169                 if (dsc) {
170                         decoder.reset (new DCPSubtitleDecoder (dsc));
171                         frc = best_overlap_frc;
172                 }
173
174                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
175         }
176
177         _have_valid_pieces = true;
178 }
179
180 void
181 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
182 {
183         shared_ptr<Content> c = w.lock ();
184         if (!c) {
185                 return;
186         }
187
188         if (
189                 property == ContentProperty::POSITION ||
190                 property == ContentProperty::LENGTH ||
191                 property == ContentProperty::TRIM_START ||
192                 property == ContentProperty::TRIM_END ||
193                 property == ContentProperty::PATH ||
194                 property == VideoContentProperty::VIDEO_FRAME_TYPE ||
195                 property == DCPContentProperty::CAN_BE_PLAYED
196                 ) {
197                 
198                 _have_valid_pieces = false;
199                 Changed (frequent);
200
201         } else if (
202                 property == SubtitleContentProperty::USE_SUBTITLES ||
203                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
204                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
205                 property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
206                 property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
207                 property == VideoContentProperty::VIDEO_CROP ||
208                 property == VideoContentProperty::VIDEO_SCALE ||
209                 property == VideoContentProperty::VIDEO_FRAME_RATE
210                 ) {
211                 
212                 Changed (frequent);
213         }
214 }
215
216 /** @param already_resampled true if this data has already been through the chain up to the resampler */
217 void
218 Player::playlist_changed ()
219 {
220         _have_valid_pieces = false;
221         Changed (false);
222 }
223
224 void
225 Player::set_video_container_size (dcp::Size s)
226 {
227         _video_container_size = s;
228
229         _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
230         _black_image->make_black ();
231 }
232
233 void
234 Player::film_changed (Film::Property p)
235 {
236         /* Here we should notice Film properties that affect our output, and
237            alert listeners that our output now would be different to how it was
238            last time we were run.
239         */
240
241         if (p == Film::SCALER || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
242                 Changed (false);
243         }
244 }
245
246 list<PositionImage>
247 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
248 {
249         list<PositionImage> all;
250         
251         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
252                 if (!i->image) {
253                         continue;
254                 }
255
256                 /* We will scale the subtitle up to fit _video_container_size */
257                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
258                 
259                 /* Then we need a corrective translation, consisting of two parts:
260                  *
261                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
262                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
263                  *
264                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
265                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
266                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
267                  *
268                  * Combining these two translations gives these expressions.
269                  */
270
271                 all.push_back (
272                         PositionImage (
273                                 i->image->scale (
274                                         scaled_size,
275                                         Scaler::from_id ("bicubic"),
276                                         i->image->pixel_format (),
277                                         true
278                                         ),
279                                 Position<int> (
280                                         rint (_video_container_size.width * i->rectangle.x),
281                                         rint (_video_container_size.height * i->rectangle.y)
282                                         )
283                                 )
284                         );
285         }
286
287         return all;
288 }
289
290 void
291 Player::set_approximate_size ()
292 {
293         _approximate_size = true;
294 }
295
296 shared_ptr<PlayerVideo>
297 Player::black_player_video_frame (DCPTime time) const
298 {
299         return shared_ptr<PlayerVideo> (
300                 new PlayerVideo (
301                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image, _film->log ())),
302                         time,
303                         Crop (),
304                         optional<float> (),
305                         _video_container_size,
306                         _video_container_size,
307                         Scaler::from_id ("bicubic"),
308                         EYES_BOTH,
309                         PART_WHOLE,
310                         Config::instance()->colour_conversions().front().conversion
311                 )
312         );
313 }
314
315 /** @return All PlayerVideos at the given time (there may be two frames for 3D) */
316 list<shared_ptr<PlayerVideo> >
317 Player::get_video (DCPTime time, bool accurate)
318 {
319         if (!_have_valid_pieces) {
320                 setup_pieces ();
321         }
322         
323         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
324                 time,
325                 time + DCPTime::from_frames (1, _film->video_frame_rate ())
326                 );
327
328         list<shared_ptr<PlayerVideo> > pvf;
329
330         if (ov.empty ()) {
331                 /* No video content at this time */
332                 pvf.push_back (black_player_video_frame (time));
333         } else {
334                 /* Create a PlayerVideo from the content's video at this time */
335
336                 shared_ptr<Piece> piece = ov.back ();
337                 shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
338                 assert (decoder);
339                 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
340                 assert (content);
341
342                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
343                 if (content_video.empty ()) {
344                         pvf.push_back (black_player_video_frame (time));
345                         return pvf;
346                 }
347                 
348                 dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size (), _approximate_size ? 4 : 1);
349                 if (_approximate_size) {
350                         image_size.width &= ~3;
351                         image_size.height &= ~3;
352                 }
353                 
354                 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
355                         pvf.push_back (
356                                 shared_ptr<PlayerVideo> (
357                                         new PlayerVideo (
358                                                 i->image,
359                                                 content_video_to_dcp (piece, i->frame),
360                                                 content->crop (),
361                                                 content->fade (i->frame),
362                                                 image_size,
363                                                 _video_container_size,
364                                                 _film->scaler(),
365                                                 i->eyes,
366                                                 i->part,
367                                                 content->colour_conversion ()
368                                                 )
369                                         )
370                                 );
371                 }
372         }
373
374         /* Add subtitles (for possible burn-in) to whatever PlayerVideos we got */
375
376         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false);
377
378         list<PositionImage> sub_images;
379
380         /* Image subtitles */
381         list<PositionImage> c = transform_image_subtitles (ps.image);
382         copy (c.begin(), c.end(), back_inserter (sub_images));
383
384         /* Text subtitles (rendered to images) */
385         sub_images.push_back (render_subtitles (ps.text, _video_container_size));
386         
387         if (!sub_images.empty ()) {
388                 for (list<shared_ptr<PlayerVideo> >::const_iterator i = pvf.begin(); i != pvf.end(); ++i) {
389                         (*i)->set_subtitle (merge (sub_images));
390                 }
391         }       
392                 
393         return pvf;
394 }
395
396 shared_ptr<AudioBuffers>
397 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
398 {
399         if (!_have_valid_pieces) {
400                 setup_pieces ();
401         }
402
403         AudioFrame const length_frames = length.frames (_film->audio_frame_rate ());
404
405         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
406         audio->make_silent ();
407         
408         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
409         if (ov.empty ()) {
410                 return audio;
411         }
412
413         for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
414
415                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
416                 assert (content);
417                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
418                 assert (decoder);
419
420                 if (content->audio_frame_rate() == 0) {
421                         /* This AudioContent has no audio (e.g. if it is an FFmpegContent with no
422                          * audio stream).
423                          */
424                         continue;
425                 }
426
427                 /* The time that we should request from the content */
428                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
429                 DCPTime offset;
430                 if (request < DCPTime ()) {
431                         /* We went off the start of the content, so we will need to offset
432                            the stuff we get back.
433                         */
434                         offset = -request;
435                         request = DCPTime ();
436                 }
437
438                 AudioFrame const content_frame = dcp_to_content_audio (*i, request);
439
440                 /* Audio from this piece's decoder (which might be more or less than what we asked for) */
441                 shared_ptr<ContentAudio> all = decoder->get_audio (content_frame, length_frames, accurate);
442
443                 /* Gain */
444                 if (content->audio_gain() != 0) {
445                         shared_ptr<AudioBuffers> gain (new AudioBuffers (all->audio));
446                         gain->apply_gain (content->audio_gain ());
447                         all->audio = gain;
448                 }
449
450                 /* Remap channels */
451                 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all->audio->frames()));
452                 dcp_mapped->make_silent ();
453                 AudioMapping map = content->audio_mapping ();
454                 for (int i = 0; i < map.content_channels(); ++i) {
455                         for (int j = 0; j < _film->audio_channels(); ++j) {
456                                 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
457                                         dcp_mapped->accumulate_channel (
458                                                 all->audio.get(),
459                                                 i,
460                                                 j,
461                                                 map.get (i, static_cast<dcp::Channel> (j))
462                                                 );
463                                 }
464                         }
465                 }
466                 
467                 all->audio = dcp_mapped;
468
469                 audio->accumulate_frames (
470                         all->audio.get(),
471                         content_frame - all->frame,
472                         offset.frames (_film->audio_frame_rate()),
473                         min (AudioFrame (all->audio->frames()), length_frames) - offset.frames (_film->audio_frame_rate ())
474                         );
475         }
476
477         return audio;
478 }
479
480 VideoFrame
481 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
482 {
483         /* s is the offset of t from the start position of this content */
484         DCPTime s = t - piece->content->position ();
485         s = DCPTime (max (DCPTime::Type (0), s.get ()));
486         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
487
488         /* Convert this to the content frame */
489         return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) * piece->frc.factor ();
490 }
491
492 DCPTime
493 Player::content_video_to_dcp (shared_ptr<const Piece> piece, VideoFrame f) const
494 {
495         DCPTime t = DCPTime::from_frames (f / piece->frc.factor (), _film->video_frame_rate()) - piece->content->trim_start () + piece->content->position ();
496         if (t < DCPTime ()) {
497                 t = DCPTime ();
498         }
499
500         return t;
501 }
502
503 AudioFrame
504 Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
505 {
506         /* s is the offset of t from the start position of this content */
507         DCPTime s = t - piece->content->position ();
508         s = DCPTime (max (DCPTime::Type (0), s.get ()));
509         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
510
511         /* Convert this to the content frame */
512         return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate());
513 }
514
515 ContentTime
516 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
517 {
518         /* s is the offset of t from the start position of this content */
519         DCPTime s = t - piece->content->position ();
520         s = DCPTime (max (DCPTime::Type (0), s.get ()));
521         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
522
523         return ContentTime (s + piece->content->trim_start(), piece->frc);
524 }
525
526 void
527 PlayerStatistics::dump (shared_ptr<Log> log) const
528 {
529         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat), Log::TYPE_GENERAL);
530         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()), Log::TYPE_GENERAL);
531 }
532
533 PlayerStatistics const &
534 Player::statistics () const
535 {
536         return _statistics;
537 }
538
539 PlayerSubtitles
540 Player::get_subtitles (DCPTime time, DCPTime length, bool starting)
541 {
542         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
543
544         PlayerSubtitles ps (time, length);
545
546         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
547                 shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
548                 if (!subtitle_content->use_subtitles ()) {
549                         continue;
550                 }
551
552                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
553                 ContentTime const from = dcp_to_content_subtitle (*j, time);
554                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
555                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
556
557                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting);
558                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
559                         
560                         /* Apply content's subtitle offsets */
561                         i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
562                         i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
563
564                         /* Apply content's subtitle scale */
565                         i->sub.rectangle.width *= subtitle_content->subtitle_x_scale ();
566                         i->sub.rectangle.height *= subtitle_content->subtitle_y_scale ();
567
568                         /* Apply a corrective translation to keep the subtitle centred after that scale */
569                         i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_x_scale() - 1);
570                         i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_y_scale() - 1);
571                         
572                         ps.image.push_back (i->sub);
573                 }
574
575                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting);
576                 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
577                         BOOST_FOREACH (dcp::SubtitleString& s, ts.subs) {
578                                 s.set_v_position (s.v_position() + subtitle_content->subtitle_y_offset ());
579                                 s.set_size (s.size() * max (subtitle_content->subtitle_x_scale(), subtitle_content->subtitle_y_scale()));
580                                 ps.text.push_back (s);
581                         }
582                 }
583         }
584
585         return ps;
586 }