Merge master.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include <stdint.h>
21 #include <algorithm>
22 #include "player.h"
23 #include "film.h"
24 #include "ffmpeg_decoder.h"
25 #include "audio_buffers.h"
26 #include "ffmpeg_content.h"
27 #include "image_decoder.h"
28 #include "image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
32 #include "subrip_decoder.h"
33 #include "subrip_content.h"
34 #include "playlist.h"
35 #include "job.h"
36 #include "image.h"
37 #include "raw_image_proxy.h"
38 #include "ratio.h"
39 #include "log.h"
40 #include "scaler.h"
41 #include "render_subtitles.h"
42 #include "config.h"
43 #include "content_video.h"
44 #include "player_video.h"
45 #include "frame_rate_change.h"
46 #include "dcp_content.h"
47 #include "dcp_decoder.h"
48 #include "dcp_subtitle_content.h"
49 #include "dcp_subtitle_decoder.h"
50
51 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
52
53 using std::list;
54 using std::cout;
55 using std::min;
56 using std::max;
57 using std::min;
58 using std::vector;
59 using std::pair;
60 using std::map;
61 using std::make_pair;
62 using boost::shared_ptr;
63 using boost::weak_ptr;
64 using boost::dynamic_pointer_cast;
65 using boost::optional;
66
67 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
68         : _film (f)
69         , _playlist (p)
70         , _have_valid_pieces (false)
71         , _approximate_size (false)
72 {
73         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
74         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
75         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
76         set_video_container_size (_film->frame_size ());
77 }
78
79 void
80 Player::setup_pieces ()
81 {
82         list<shared_ptr<Piece> > old_pieces = _pieces;
83         _pieces.clear ();
84
85         ContentList content = _playlist->content ();
86
87         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
88
89                 if (!(*i)->paths_valid ()) {
90                         continue;
91                 }
92                 
93                 shared_ptr<Decoder> decoder;
94                 optional<FrameRateChange> frc;
95
96                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
97                 DCPTime best_overlap_t;
98                 shared_ptr<VideoContent> best_overlap;
99                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
100                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
101                         if (!vc) {
102                                 continue;
103                         }
104                         
105                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
106                         if (overlap > best_overlap_t) {
107                                 best_overlap = vc;
108                                 best_overlap_t = overlap;
109                         }
110                 }
111
112                 optional<FrameRateChange> best_overlap_frc;
113                 if (best_overlap) {
114                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
115                 } else {
116                         /* No video overlap; e.g. if the DCP is just audio */
117                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
118                 }
119
120                 /* FFmpeg */
121                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
122                 if (fc) {
123                         decoder.reset (new FFmpegDecoder (fc, _film->log()));
124                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
125                 }
126
127                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (*i);
128                 if (dc) {
129                         decoder.reset (new DCPDecoder (dc, _film->log ()));
130                         frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
131                 }
132
133                 /* ImageContent */
134                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
135                 if (ic) {
136                         /* See if we can re-use an old ImageDecoder */
137                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
138                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
139                                 if (imd && imd->content() == ic) {
140                                         decoder = imd;
141                                 }
142                         }
143
144                         if (!decoder) {
145                                 decoder.reset (new ImageDecoder (ic));
146                         }
147
148                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
149                 }
150
151                 /* SndfileContent */
152                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
153                 if (sc) {
154                         decoder.reset (new SndfileDecoder (sc));
155                         frc = best_overlap_frc;
156                 }
157
158                 /* SubRipContent */
159                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
160                 if (rc) {
161                         decoder.reset (new SubRipDecoder (rc));
162                         frc = best_overlap_frc;
163                 }
164
165                 /* DCPSubtitleContent */
166                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (*i);
167                 if (dsc) {
168                         decoder.reset (new DCPSubtitleDecoder (dsc));
169                         frc = best_overlap_frc;
170                 }
171
172                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
173         }
174
175         _have_valid_pieces = true;
176 }
177
178 void
179 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
180 {
181         shared_ptr<Content> c = w.lock ();
182         if (!c) {
183                 return;
184         }
185
186         if (
187                 property == ContentProperty::POSITION ||
188                 property == ContentProperty::LENGTH ||
189                 property == ContentProperty::TRIM_START ||
190                 property == ContentProperty::TRIM_END ||
191                 property == ContentProperty::PATH ||
192                 property == VideoContentProperty::VIDEO_FRAME_TYPE
193                 ) {
194                 
195                 _have_valid_pieces = false;
196                 Changed (frequent);
197
198         } else if (
199                 property == SubtitleContentProperty::SUBTITLE_USE ||
200                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
201                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
202                 property == SubtitleContentProperty::SUBTITLE_SCALE ||
203                 property == VideoContentProperty::VIDEO_CROP ||
204                 property == VideoContentProperty::VIDEO_SCALE ||
205                 property == VideoContentProperty::VIDEO_FRAME_RATE
206                 ) {
207                 
208                 Changed (frequent);
209         }
210 }
211
212 /** @param already_resampled true if this data has already been through the chain up to the resampler */
213 void
214 Player::playlist_changed ()
215 {
216         _have_valid_pieces = false;
217         Changed (false);
218 }
219
220 void
221 Player::set_video_container_size (dcp::Size s)
222 {
223         _video_container_size = s;
224
225         _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
226         _black_image->make_black ();
227 }
228
229 void
230 Player::film_changed (Film::Property p)
231 {
232         /* Here we should notice Film properties that affect our output, and
233            alert listeners that our output now would be different to how it was
234            last time we were run.
235         */
236
237         if (p == Film::SCALER || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
238                 Changed (false);
239         }
240 }
241
242 list<PositionImage>
243 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
244 {
245         list<PositionImage> all;
246         
247         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
248                 if (!i->image) {
249                         continue;
250                 }
251
252                 /* We will scale the subtitle up to fit _video_container_size */
253                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
254                 
255                 /* Then we need a corrective translation, consisting of two parts:
256                  *
257                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
258                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
259                  *
260                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
261                  *     (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
262                  *     (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
263                  *
264                  * Combining these two translations gives these expressions.
265                  */
266
267                 all.push_back (
268                         PositionImage (
269                                 i->image->scale (
270                                         scaled_size,
271                                         Scaler::from_id ("bicubic"),
272                                         i->image->pixel_format (),
273                                         true
274                                         ),
275                                 Position<int> (
276                                         rint (_video_container_size.width * i->rectangle.x),
277                                         rint (_video_container_size.height * i->rectangle.y)
278                                         )
279                                 )
280                         );
281         }
282
283         return all;
284 }
285
286 void
287 Player::set_approximate_size ()
288 {
289         _approximate_size = true;
290 }
291
292 shared_ptr<PlayerVideo>
293 Player::black_player_video_frame (DCPTime time) const
294 {
295         return shared_ptr<PlayerVideo> (
296                 new PlayerVideo (
297                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image, _film->log ())),
298                         time,
299                         Crop (),
300                         _video_container_size,
301                         _video_container_size,
302                         Scaler::from_id ("bicubic"),
303                         EYES_BOTH,
304                         PART_WHOLE,
305                         Config::instance()->colour_conversions().front().conversion
306                 )
307         );
308 }
309
310 /** @return All PlayerVideos at the given time (there may be two frames for 3D) */
311 list<shared_ptr<PlayerVideo> >
312 Player::get_video (DCPTime time, bool accurate)
313 {
314         if (!_have_valid_pieces) {
315                 setup_pieces ();
316         }
317         
318         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
319                 time,
320                 time + DCPTime::from_frames (1, _film->video_frame_rate ())
321                 );
322
323         list<shared_ptr<PlayerVideo> > pvf;
324
325         if (ov.empty ()) {
326                 /* No video content at this time */
327                 pvf.push_back (black_player_video_frame (time));
328         } else {
329                 /* Create a PlayerVideo from the content's video at this time */
330
331                 shared_ptr<Piece> piece = ov.back ();
332                 shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
333                 assert (decoder);
334                 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
335                 assert (content);
336
337                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
338                 if (content_video.empty ()) {
339                         pvf.push_back (black_player_video_frame (time));
340                         return pvf;
341                 }
342                 
343                 dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
344                 if (_approximate_size) {
345                         image_size.width &= ~3;
346                         image_size.height &= ~3;
347                 }
348                 
349                 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
350                         pvf.push_back (
351                                 shared_ptr<PlayerVideo> (
352                                         new PlayerVideo (
353                                                 i->image,
354                                                 content_video_to_dcp (piece, i->frame),
355                                                 content->crop (),
356                                                 image_size,
357                                                 _video_container_size,
358                                                 _film->scaler(),
359                                                 i->eyes,
360                                                 i->part,
361                                                 content->colour_conversion ()
362                                                 )
363                                         )
364                                 );
365                 }
366         }
367
368         /* Add subtitles (for possible burn-in) to whatever PlayerVideos we got */
369
370         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false);
371
372         list<PositionImage> sub_images;
373
374         /* Image subtitles */
375         list<PositionImage> c = transform_image_subtitles (ps.image);
376         copy (c.begin(), c.end(), back_inserter (sub_images));
377
378         /* Text subtitles (rendered to images) */
379         sub_images.push_back (render_subtitles (ps.text, _video_container_size));
380         
381         if (!sub_images.empty ()) {
382                 for (list<shared_ptr<PlayerVideo> >::const_iterator i = pvf.begin(); i != pvf.end(); ++i) {
383                         (*i)->set_subtitle (merge (sub_images));
384                 }
385         }       
386                 
387         return pvf;
388 }
389
390 shared_ptr<AudioBuffers>
391 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
392 {
393         if (!_have_valid_pieces) {
394                 setup_pieces ();
395         }
396
397         AudioFrame const length_frames = length.frames (_film->audio_frame_rate ());
398
399         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
400         audio->make_silent ();
401         
402         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
403         if (ov.empty ()) {
404                 return audio;
405         }
406
407         for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
408
409                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
410                 assert (content);
411                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
412                 assert (decoder);
413
414                 if (content->audio_frame_rate() == 0) {
415                         /* This AudioContent has no audio (e.g. if it is an FFmpegContent with no
416                          * audio stream).
417                          */
418                         continue;
419                 }
420
421                 /* The time that we should request from the content */
422                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
423                 DCPTime offset;
424                 if (request < DCPTime ()) {
425                         /* We went off the start of the content, so we will need to offset
426                            the stuff we get back.
427                         */
428                         offset = -request;
429                         request = DCPTime ();
430                 }
431
432                 AudioFrame const content_frame = dcp_to_content_audio (*i, request);
433
434                 /* Audio from this piece's decoder (which might be more or less than what we asked for) */
435                 shared_ptr<ContentAudio> all = decoder->get_audio (content_frame, length_frames, accurate);
436
437                 /* Gain */
438                 if (content->audio_gain() != 0) {
439                         shared_ptr<AudioBuffers> gain (new AudioBuffers (all->audio));
440                         gain->apply_gain (content->audio_gain ());
441                         all->audio = gain;
442                 }
443
444                 /* Remap channels */
445                 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all->audio->frames()));
446                 dcp_mapped->make_silent ();
447                 AudioMapping map = content->audio_mapping ();
448                 for (int i = 0; i < map.content_channels(); ++i) {
449                         for (int j = 0; j < _film->audio_channels(); ++j) {
450                                 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
451                                         dcp_mapped->accumulate_channel (
452                                                 all->audio.get(),
453                                                 i,
454                                                 j,
455                                                 map.get (i, static_cast<dcp::Channel> (j))
456                                                 );
457                                 }
458                         }
459                 }
460                 
461                 all->audio = dcp_mapped;
462
463                 audio->accumulate_frames (
464                         all->audio.get(),
465                         content_frame - all->frame,
466                         offset.frames (_film->audio_frame_rate()),
467                         min (AudioFrame (all->audio->frames()), length_frames) - offset.frames (_film->audio_frame_rate ())
468                         );
469         }
470
471         return audio;
472 }
473
474 VideoFrame
475 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
476 {
477         /* s is the offset of t from the start position of this content */
478         DCPTime s = t - piece->content->position ();
479         s = DCPTime (max (DCPTime::Type (0), s.get ()));
480         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
481
482         /* Convert this to the content frame */
483         return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) * piece->frc.factor ();
484 }
485
486 DCPTime
487 Player::content_video_to_dcp (shared_ptr<const Piece> piece, VideoFrame f) const
488 {
489         DCPTime t = DCPTime::from_frames (f / piece->frc.factor (), _film->video_frame_rate()) - piece->content->trim_start () + piece->content->position ();
490         if (t < DCPTime ()) {
491                 t = DCPTime ();
492         }
493
494         return t;
495 }
496
497 AudioFrame
498 Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
499 {
500         /* s is the offset of t from the start position of this content */
501         DCPTime s = t - piece->content->position ();
502         s = DCPTime (max (DCPTime::Type (0), s.get ()));
503         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
504
505         /* Convert this to the content frame */
506         return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate());
507 }
508
509 ContentTime
510 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
511 {
512         /* s is the offset of t from the start position of this content */
513         DCPTime s = t - piece->content->position ();
514         s = DCPTime (max (DCPTime::Type (0), s.get ()));
515         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
516
517         return ContentTime (s + piece->content->trim_start(), piece->frc);
518 }
519
520 void
521 PlayerStatistics::dump (shared_ptr<Log> log) const
522 {
523         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat), Log::TYPE_GENERAL);
524         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()), Log::TYPE_GENERAL);
525 }
526
527 PlayerStatistics const &
528 Player::statistics () const
529 {
530         return _statistics;
531 }
532
533 PlayerSubtitles
534 Player::get_subtitles (DCPTime time, DCPTime length, bool starting)
535 {
536         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
537
538         PlayerSubtitles ps (time, length);
539
540         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
541                 shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
542                 if (!subtitle_content->subtitle_use ()) {
543                         continue;
544                 }
545
546                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
547                 ContentTime const from = dcp_to_content_subtitle (*j, time);
548                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
549                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
550
551                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting);
552                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
553                         
554                         /* Apply content's subtitle offsets */
555                         i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
556                         i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
557
558                         /* Apply content's subtitle scale */
559                         i->sub.rectangle.width *= subtitle_content->subtitle_scale ();
560                         i->sub.rectangle.height *= subtitle_content->subtitle_scale ();
561
562                         /* Apply a corrective translation to keep the subtitle centred after that scale */
563                         i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_scale() - 1);
564                         i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_scale() - 1);
565                         
566                         ps.image.push_back (i->sub);
567                 }
568
569                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting);
570                 for (list<ContentTextSubtitle>::const_iterator i = text.begin(); i != text.end(); ++i) {
571                         copy (i->subs.begin(), i->subs.end(), back_inserter (ps.text));
572                 }
573         }
574
575         return ps;
576 }