2 Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "ffmpeg_decoder.h"
24 #include "ffmpeg_content.h"
25 #include "still_image_decoder.h"
26 #include "still_image_content.h"
27 #include "moving_image_decoder.h"
28 #include "moving_image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
36 #include "resampler.h"
47 using boost::shared_ptr;
48 using boost::weak_ptr;
49 using boost::dynamic_pointer_cast;
51 //#define DEBUG_PLAYER 1
56 Piece (shared_ptr<Content> c)
58 , video_position (c->position ())
59 , audio_position (c->position ())
62 Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
65 , video_position (c->position ())
66 , audio_position (c->position ())
69 shared_ptr<Content> content;
70 shared_ptr<Decoder> decoder;
76 std::ostream& operator<<(std::ostream& s, Piece const & p)
78 if (dynamic_pointer_cast<FFmpegContent> (p.content)) {
80 } else if (dynamic_pointer_cast<StillImageContent> (p.content)) {
82 } else if (dynamic_pointer_cast<SndfileContent> (p.content)) {
86 s << " at " << p.content->position() << " until " << p.content->end();
92 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
97 , _have_valid_pieces (false)
100 , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
101 , _last_emit_was_black (false)
103 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
104 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
105 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
106 set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
110 Player::disable_video ()
116 Player::disable_audio ()
124 if (!_have_valid_pieces) {
126 _have_valid_pieces = true;
133 Time earliest_t = TIME_MAX;
134 shared_ptr<Piece> earliest;
140 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
141 if ((*i)->decoder->done ()) {
145 if (_video && dynamic_pointer_cast<VideoDecoder> ((*i)->decoder)) {
146 if ((*i)->video_position < earliest_t) {
147 earliest_t = (*i)->video_position;
153 if (_audio && dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
154 if ((*i)->audio_position < earliest_t) {
155 earliest_t = (*i)->audio_position;
164 cout << "no earliest piece.\n";
173 if (earliest_t > _video_position) {
175 cout << "no video here; emitting black frame (earliest=" << earliest_t << ", video_position=" << _video_position << ").\n";
180 cout << "Pass video " << *earliest << "\n";
182 earliest->decoder->pass ();
187 if (earliest_t > _audio_position) {
189 cout << "no audio here (none until " << earliest_t << "); emitting silence.\n";
191 emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
194 cout << "Pass audio " << *earliest << "\n";
196 earliest->decoder->pass ();
198 if (earliest->decoder->done()) {
199 shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
201 shared_ptr<Resampler> re = resampler (ac, false);
203 shared_ptr<const AudioBuffers> b = re->flush ();
205 process_audio (earliest, b, ac->audio_length ());
214 Time audio_done_up_to = TIME_MAX;
215 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
216 if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
217 audio_done_up_to = min (audio_done_up_to, (*i)->audio_position);
221 TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to);
222 Audio (tb.audio, tb.time);
223 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
227 cout << "\tpost pass _video_position=" << _video_position << " _audio_position=" << _audio_position << "\n";
234 Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
236 shared_ptr<Piece> piece = weak_piece.lock ();
241 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
244 FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
245 if (frc.skip && (frame % 2) == 1) {
249 Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
250 if (content->trimmed (relative_time)) {
254 /* Convert to RGB first, as FFmpeg doesn't seem to like handling YUV images with odd widths */
255 shared_ptr<Image> work_image = image->scale (image->size (), _film->scaler(), PIX_FMT_RGB24, true);
257 work_image = work_image->crop (content->crop(), true);
259 float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
260 libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
262 work_image = work_image->scale (image_size, _film->scaler(), PIX_FMT_RGB24, true);
264 Time time = content->position() + relative_time - content->trim_start ();
266 if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
267 work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
270 if (image_size != _video_container_size) {
271 assert (image_size.width <= _video_container_size.width);
272 assert (image_size.height <= _video_container_size.height);
273 shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
275 im->copy (work_image, Position<int> ((_video_container_size.width - image_size.width) / 2, (_video_container_size.height - image_size.height) / 2));
279 #ifdef DCPOMATIC_DEBUG
280 _last_video = piece->content;
283 Video (work_image, eyes, content->colour_conversion(), same, time);
284 time += TIME_HZ / _film->video_frame_rate();
287 Video (work_image, eyes, content->colour_conversion(), true, time);
288 time += TIME_HZ / _film->video_frame_rate();
291 _last_emit_was_black = false;
293 _video_position = piece->video_position = time;
297 Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
299 shared_ptr<Piece> piece = weak_piece.lock ();
304 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
308 if (content->audio_gain() != 0) {
309 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
310 gain->apply_gain (content->audio_gain ());
315 if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
316 shared_ptr<Resampler> r = resampler (content, true);
317 pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
322 Time const relative_time = _film->audio_frames_to_time (frame);
324 if (content->trimmed (relative_time)) {
328 Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time;
331 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
332 dcp_mapped->make_silent ();
333 list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
334 for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
335 if (i->first < audio->channels() && i->second < dcp_mapped->channels()) {
336 dcp_mapped->accumulate_channel (audio.get(), i->first, i->second);
342 /* We must cut off anything that comes before the start of all time */
344 int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
345 if (frames >= audio->frames ()) {
349 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
350 trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
356 _audio_merger.push (audio, time);
357 piece->audio_position += _film->audio_frames_to_time (audio->frames ());
363 TimedAudioBuffers<Time> tb = _audio_merger.flush ();
365 Audio (tb.audio, tb.time);
366 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
369 while (_video_position < _audio_position) {
373 while (_audio_position < _video_position) {
374 emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
379 /** Seek so that the next pass() will yield (approximately) the requested frame.
380 * Pass accurate = true to try harder to get close to the request.
381 * @return true on error
384 Player::seek (Time t, bool accurate)
386 if (!_have_valid_pieces) {
388 _have_valid_pieces = true;
391 if (_pieces.empty ()) {
395 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
396 shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
401 Time s = t - vc->position ();
402 s = max (static_cast<Time> (0), s);
403 s = min (vc->length_after_trim(), s);
405 (*i)->video_position = (*i)->audio_position = vc->position() + s;
407 FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
408 /* Here we are converting from time (in the DCP) to a frame number in the content.
409 Hence we need to use the DCP's frame rate and the double/skip correction, not
412 VideoContent::Frame f = (s + vc->trim_start ()) * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
413 dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
416 _video_position = _audio_position = t;
418 /* XXX: don't seek audio because we don't need to... */
422 Player::setup_pieces ()
424 list<shared_ptr<Piece> > old_pieces = _pieces;
428 ContentList content = _playlist->content ();
429 sort (content.begin(), content.end(), ContentSorter ());
431 for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
433 shared_ptr<Piece> piece (new Piece (*i));
435 /* XXX: into content? */
437 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
439 shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
441 fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
442 fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
443 fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
448 shared_ptr<const StillImageContent> ic = dynamic_pointer_cast<const StillImageContent> (*i);
450 shared_ptr<StillImageDecoder> id;
452 /* See if we can re-use an old StillImageDecoder */
453 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
454 shared_ptr<StillImageDecoder> imd = dynamic_pointer_cast<StillImageDecoder> ((*j)->decoder);
455 if (imd && imd->content() == ic) {
461 id.reset (new StillImageDecoder (_film, ic));
462 id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
468 shared_ptr<const MovingImageContent> mc = dynamic_pointer_cast<const MovingImageContent> (*i);
470 shared_ptr<MovingImageDecoder> md;
473 md.reset (new MovingImageDecoder (_film, mc));
474 md->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
480 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
482 shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
483 sd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
488 _pieces.push_back (piece);
492 cout << "=== Player setup:\n";
493 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
494 cout << *(i->get()) << "\n";
500 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
502 shared_ptr<Content> c = w.lock ();
508 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
509 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END
512 _have_valid_pieces = false;
515 } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
521 property == VideoContentProperty::VIDEO_FRAME_TYPE || property == VideoContentProperty::VIDEO_CROP ||
522 property == VideoContentProperty::VIDEO_RATIO
530 Player::playlist_changed ()
532 _have_valid_pieces = false;
537 Player::set_video_container_size (libdcp::Size s)
539 _video_container_size = s;
540 _black_frame.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
541 _black_frame->make_black ();
544 shared_ptr<Resampler>
545 Player::resampler (shared_ptr<AudioContent> c, bool create)
547 map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
548 if (i != _resamplers.end ()) {
553 return shared_ptr<Resampler> ();
556 shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
562 Player::emit_black ()
564 #ifdef DCPOMATIC_DEBUG
565 _last_video.reset ();
568 Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
569 _video_position += _film->video_frames_to_time (1);
570 _last_emit_was_black = true;
574 Player::emit_silence (OutputAudioFrame most)
580 OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
581 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
582 silence->make_silent ();
583 Audio (silence, _audio_position);
584 _audio_position += _film->audio_frames_to_time (N);
588 Player::film_changed (Film::Property p)
590 /* Here we should notice Film properties that affect our output, and
591 alert listeners that our output now would be different to how it was
592 last time we were run.
595 if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) {
601 Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
603 _in_subtitle.piece = weak_piece;
604 _in_subtitle.image = image;
605 _in_subtitle.rect = rect;
606 _in_subtitle.from = from;
607 _in_subtitle.to = to;
613 Player::update_subtitle ()
615 shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
620 if (!_in_subtitle.image) {
621 _out_subtitle.image.reset ();
625 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
628 dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
629 libdcp::Size scaled_size;
631 in_rect.y += sc->subtitle_offset ();
633 /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
634 scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
635 scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
637 /* Then we need a corrective translation, consisting of two parts:
639 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
640 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
642 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
643 * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
644 * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
646 * Combining these two translations gives these expressions.
649 _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
650 _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
652 _out_subtitle.image = _in_subtitle.image->scale (
654 Scaler::from_id ("bicubic"),
655 _in_subtitle.image->pixel_format (),
658 _out_subtitle.from = _in_subtitle.from + piece->content->position ();
659 _out_subtitle.to = _in_subtitle.to + piece->content->position ();