#include "playlist.h"
#include "job.h"
#include "image.h"
+#include "image_proxy.h"
#include "ratio.h"
#include "log.h"
#include "scaler.h"
#include "render_subtitles.h"
-#include "dcp_video.h"
#include "config.h"
#include "content_video.h"
+#include "player_video_frame.h"
+#include "frame_rate_change.h"
+
+#define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
using std::list;
using std::cout;
}
}
+/** @param already_resampled true if this data has already been through the chain up to the resampler */
void
Player::playlist_changed ()
{
_approximate_size = true;
}
-shared_ptr<DCPVideo>
-Player::black_dcp_video (DCPTime time) const
+shared_ptr<PlayerVideoFrame>
+Player::black_player_video_frame () const
{
- return shared_ptr<DCPVideo> (
- new DCPVideo (
- _black_image,
- EYES_BOTH,
+ return shared_ptr<PlayerVideoFrame> (
+ new PlayerVideoFrame (
+ shared_ptr<const ImageProxy> (new RawImageProxy (_black_image, _film->log ())),
Crop (),
_video_container_size,
_video_container_size,
Scaler::from_id ("bicubic"),
- Config::instance()->colour_conversions().front().conversion,
- time
+ EYES_BOTH,
+ PART_WHOLE,
+ Config::instance()->colour_conversions().front().conversion
)
);
}
-shared_ptr<DCPVideo>
-Player::content_to_dcp (
+shared_ptr<PlayerVideoFrame>
+Player::content_to_player_video_frame (
shared_ptr<VideoContent> content,
ContentVideo content_video,
list<shared_ptr<Piece> > subs,
DCPTime time,
dcp::Size image_size) const
{
- shared_ptr<DCPVideo> dcp_video (
- new DCPVideo (
+ shared_ptr<PlayerVideoFrame> pvf (
+ new PlayerVideoFrame (
content_video.image,
- content_video.eyes,
content->crop (),
image_size,
_video_container_size,
_film->scaler(),
- content->colour_conversion (),
- time
+ content_video.eyes,
+ content_video.part,
+ content->colour_conversion ()
)
);
ContentTime const from = dcp_to_content_subtitle (*i, time);
ContentTime const to = from + ContentTime::from_frames (1, content->video_frame_rate ());
- list<shared_ptr<ContentImageSubtitle> > image_subtitles = subtitle_decoder->get_image_subtitles (from, to);
+ list<shared_ptr<ContentImageSubtitle> > image_subtitles = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to));
if (!image_subtitles.empty ()) {
list<PositionImage> im = process_content_image_subtitles (
subtitle_content,
}
if (_burn_subtitles) {
- list<shared_ptr<ContentTextSubtitle> > text_subtitles = subtitle_decoder->get_text_subtitles (from, to);
+ list<shared_ptr<ContentTextSubtitle> > text_subtitles = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to));
if (!text_subtitles.empty ()) {
list<PositionImage> im = process_content_text_subtitles (text_subtitles);
copy (im.begin(), im.end(), back_inserter (sub_images));
}
if (!sub_images.empty ()) {
- dcp_video->set_subtitle (merge (sub_images));
+ pvf->set_subtitle (merge (sub_images));
}
- return dcp_video;
+ return pvf;
}
-/** @return All DCPVideo at the given time (there may be two frames for 3D) */
-list<shared_ptr<DCPVideo> >
+/** @return All PlayerVideoFrames at the given time (there may be two frames for 3D) */
+list<shared_ptr<PlayerVideoFrame> >
Player::get_video (DCPTime time, bool accurate)
{
if (!_have_valid_pieces) {
time + DCPTime::from_frames (1, _film->video_frame_rate ())
);
- list<shared_ptr<DCPVideo> > dcp_video;
+ list<shared_ptr<PlayerVideoFrame> > pvf;
if (ov.empty ()) {
/* No video content at this time */
- dcp_video.push_back (black_dcp_video (time));
- return dcp_video;
+ pvf.push_back (black_player_video_frame ());
+ return pvf;
}
- /* Create a DCPVideo from the content's video at this time */
+ /* Create a PlayerVideoFrame from the content's video at this time */
shared_ptr<Piece> piece = ov.back ();
shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
if (content_video.empty ()) {
- dcp_video.push_back (black_dcp_video (time));
- return dcp_video;
+ pvf.push_back (black_player_video_frame ());
+ return pvf;
}
dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
time + DCPTime::from_frames (1, _film->video_frame_rate ())
);
- dcp_video.push_back (content_to_dcp (content, *i, subs, time, image_size));
+ pvf.push_back (content_to_player_video_frame (content, *i, subs, time, image_size));
}
- return dcp_video;
+ return pvf;
}
shared_ptr<AudioBuffers>
min (AudioFrame (all->audio->frames()), length_frames) - offset.frames (_film->audio_frame_rate ())
);
}
-
- return audio;
}
VideoFrame
s = DCPTime (max (int64_t (0), s.get ()));
s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
- return ContentTime (s, piece->frc);
+ return ContentTime (s + piece->content->trim_start(), piece->frc);
}
void
PlayerStatistics::dump (shared_ptr<Log> log) const
{
- log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
- log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()));
+ log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat), Log::TYPE_GENERAL);
+ log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()), Log::TYPE_GENERAL);
}
PlayerStatistics const &