}
void
-AnalyseAudioJob::audio (shared_ptr<const AudioBuffers> b, Time)
+AnalyseAudioJob::audio (shared_ptr<const AudioBuffers> b, DCPTime)
{
for (int i = 0; i < b->frames(); ++i) {
for (int j = 0; j < b->channels(); ++j) {
void run ();
private:
- void audio (boost::shared_ptr<const AudioBuffers>, Time);
+ void audio (boost::shared_ptr<const AudioBuffers>, DCPTime);
boost::weak_ptr<AudioContent> _content;
OutputAudioFrame _done;
int const AudioContentProperty::AUDIO_DELAY = 204;
int const AudioContentProperty::AUDIO_MAPPING = 205;
-AudioContent::AudioContent (shared_ptr<const Film> f, Time s)
+AudioContent::AudioContent (shared_ptr<const Film> f, DCPTime s)
: Content (f, s)
, _audio_gain (0)
, _audio_delay (0)
* the `controlling' video content is active.
*/
AudioContent::Frame
-AudioContent::time_to_content_audio_frames (Time t, Time at) const
+AudioContent::time_to_content_audio_frames (DCPTime t, DCPTime at) const
{
shared_ptr<const Film> film = _film.lock ();
assert (film);
public:
typedef int64_t Frame;
- AudioContent (boost::shared_ptr<const Film>, Time);
+ AudioContent (boost::shared_ptr<const Film>, DCPTime);
AudioContent (boost::shared_ptr<const Film>, boost::filesystem::path);
AudioContent (boost::shared_ptr<const Film>, boost::shared_ptr<const cxml::Node>);
AudioContent (boost::shared_ptr<const Film>, std::vector<boost::shared_ptr<Content> >);
return _audio_delay;
}
- Frame time_to_content_audio_frames (Time, Time) const;
+ Frame time_to_content_audio_frames (DCPTime, DCPTime) const;
private:
/** Gain to apply to audio in dB */
}
void
- clear (Time t)
+ clear (DCPTime t)
{
_last_pull = t;
_buffers.reset (new AudioBuffers (_buffers->channels(), 0));
}
-Content::Content (shared_ptr<const Film> f, Time p)
+Content::Content (shared_ptr<const Film> f, DCPTime p)
: _film (f)
, _position (p)
, _trim_start (0)
_paths.push_back ((*i)->content ());
}
_digest = node->string_child ("Digest");
- _position = node->number_child<Time> ("Position");
- _trim_start = node->number_child<Time> ("TrimStart");
- _trim_end = node->number_child<Time> ("TrimEnd");
+ _position = node->number_child<DCPTime> ("Position");
+ _trim_start = node->number_child<DCPTime> ("TrimStart");
+ _trim_end = node->number_child<DCPTime> ("TrimEnd");
}
Content::Content (shared_ptr<const Film> f, vector<shared_ptr<Content> > c)
}
void
-Content::set_position (Time p)
+Content::set_position (DCPTime p)
{
{
boost::mutex::scoped_lock lm (_mutex);
}
void
-Content::set_trim_start (Time t)
+Content::set_trim_start (DCPTime t)
{
{
boost::mutex::scoped_lock lm (_mutex);
}
void
-Content::set_trim_end (Time t)
+Content::set_trim_end (DCPTime t)
{
{
boost::mutex::scoped_lock lm (_mutex);
return String::compose ("%1 %2 %3", path_summary(), digest(), position());
}
-Time
+DCPTime
Content::length_after_trim () const
{
return full_length() - trim_start() - trim_end();
* @return true if this time is trimmed by our trim settings.
*/
bool
-Content::trimmed (Time t) const
+Content::trimmed (DCPTime t) const
{
return (t < trim_start() || t > (full_length() - trim_end ()));
}
{
public:
Content (boost::shared_ptr<const Film>);
- Content (boost::shared_ptr<const Film>, Time);
+ Content (boost::shared_ptr<const Film>, DCPTime);
Content (boost::shared_ptr<const Film>, boost::filesystem::path);
Content (boost::shared_ptr<const Film>, boost::shared_ptr<const cxml::Node>);
Content (boost::shared_ptr<const Film>, std::vector<boost::shared_ptr<Content> >);
virtual std::string technical_summary () const;
virtual std::string information () const = 0;
virtual void as_xml (xmlpp::Node *) const;
- virtual Time full_length () const = 0;
+ virtual DCPTime full_length () const = 0;
virtual std::string identifier () const;
boost::shared_ptr<Content> clone () const;
return _digest;
}
- void set_position (Time);
+ void set_position (DCPTime);
- /** Time that this content starts; i.e. the time that the first
+ /** DCPTime that this content starts; i.e. the time that the first
* bit of the content (trimmed or not) will happen.
*/
- Time position () const {
+ DCPTime position () const {
boost::mutex::scoped_lock lm (_mutex);
return _position;
}
- void set_trim_start (Time);
+ void set_trim_start (DCPTime);
- Time trim_start () const {
+ DCPTime trim_start () const {
boost::mutex::scoped_lock lm (_mutex);
return _trim_start;
}
- void set_trim_end (Time);
+ void set_trim_end (DCPTime);
- Time trim_end () const {
+ DCPTime trim_end () const {
boost::mutex::scoped_lock lm (_mutex);
return _trim_end;
}
- Time end () const {
+ DCPTime end () const {
return position() + length_after_trim() - 1;
}
- Time length_after_trim () const;
+ DCPTime length_after_trim () const;
void set_change_signals_frequent (bool f) {
_change_signals_frequent = f;
}
- bool trimmed (Time) const;
+ bool trimmed (DCPTime) const;
boost::signals2::signal<void (boost::weak_ptr<Content>, int, bool)> Changed;
private:
std::string _digest;
- Time _position;
- Time _trim_start;
- Time _trim_end;
+ DCPTime _position;
+ DCPTime _trim_start;
+ DCPTime _trim_end;
bool _change_signals_frequent;
};
* time. Pass accurate = true to try harder to get close to
* the request.
*/
- virtual void seek (Time time, bool accurate) = 0;
+ virtual void seek (DCPTime time, bool accurate) = 0;
virtual bool done () const = 0;
FFmpegStream::as_xml (root);
}
-Time
+DCPTime
FFmpegContent::full_length () const
{
shared_ptr<const Film> film = _film.lock ();
std::string technical_summary () const;
std::string information () const;
void as_xml (xmlpp::Node *) const;
- Time full_length () const;
+ DCPTime full_length () const;
std::string identifier () const;
}
bool
-FFmpegDecoder::seek_overrun_finished (Time seek) const
+FFmpegDecoder::seek_overrun_finished (DCPTime seek) const
{
return (
_video_position >= _ffmpeg_content->time_to_content_video_frames (seek) ||
}
void
-FFmpegDecoder::seek_and_flush (Time t)
+FFmpegDecoder::seek_and_flush (DCPTime t)
{
int64_t const initial_v = ((_ffmpeg_content->time_to_content_video_frames (t) / _ffmpeg_content->video_frame_rate()) - _video_pts_offset) /
av_q2d (_format_context->streams[_video_stream]->time_base);
}
void
-FFmpegDecoder::seek (Time time, bool accurate)
+FFmpegDecoder::seek (DCPTime time, bool accurate)
{
/* If we are doing an accurate seek, our initial shot will be 200ms (200 being
a number plucked from the air) earlier than we want to end up. The loop below
will hopefully then step through to where we want to be.
*/
- Time pre_roll = accurate ? (0.2 * TIME_HZ) : 0;
- Time initial_seek = time - pre_roll;
+ DCPTime pre_roll = accurate ? (0.2 * TIME_HZ) : 0;
+ DCPTime initial_seek = time - pre_roll;
if (initial_seek < 0) {
initial_seek = 0;
}
double const packet_time = static_cast<double> (sub.pts) / AV_TIME_BASE;
/* hence start time for this sub */
- Time const from = (packet_time + (double (sub.start_display_time) / 1e3)) * TIME_HZ;
- Time const to = (packet_time + (double (sub.end_display_time) / 1e3)) * TIME_HZ;
+ DCPTime const from = (packet_time + (double (sub.start_display_time) / 1e3)) * TIME_HZ;
+ DCPTime const to = (packet_time + (double (sub.end_display_time) / 1e3)) * TIME_HZ;
AVSubtitleRect const * rect = sub.rects[0];
~FFmpegDecoder ();
void pass ();
- void seek (Time time, bool);
+ void seek (DCPTime time, bool);
bool done () const;
private:
void maybe_add_subtitle ();
boost::shared_ptr<AudioBuffers> deinterleave_audio (uint8_t** data, int size);
- bool seek_overrun_finished (Time) const;
+ bool seek_overrun_finished (DCPTime) const;
bool seek_final_finished (int, int) const;
int minimal_run (boost::function<bool (int)>);
void seek_and_flush (int64_t);
_playlist->move_later (c);
}
-Time
+DCPTime
Film::length () const
{
return _playlist->length ();
}
FrameRateChange
-Film::active_frame_rate_change (Time t) const
+Film::active_frame_rate_change (DCPTime t) const
{
return _playlist->active_frame_rate_change (t, video_frame_rate ());
}
}
OutputAudioFrame
-Film::time_to_audio_frames (Time t) const
+Film::time_to_audio_frames (DCPTime t) const
{
return t * audio_frame_rate () / TIME_HZ;
}
OutputVideoFrame
-Film::time_to_video_frames (Time t) const
+Film::time_to_video_frames (DCPTime t) const
{
return t * video_frame_rate () / TIME_HZ;
}
-Time
+DCPTime
Film::audio_frames_to_time (OutputAudioFrame f) const
{
return f * TIME_HZ / audio_frame_rate ();
}
-Time
+DCPTime
Film::video_frames_to_time (OutputVideoFrame f) const
{
return f * TIME_HZ / video_frame_rate ();
OutputAudioFrame audio_frame_rate () const;
- OutputAudioFrame time_to_audio_frames (Time) const;
- OutputVideoFrame time_to_video_frames (Time) const;
- Time video_frames_to_time (OutputVideoFrame) const;
- Time audio_frames_to_time (OutputAudioFrame) const;
+ OutputAudioFrame time_to_audio_frames (DCPTime) const;
+ OutputVideoFrame time_to_video_frames (DCPTime) const;
+ DCPTime video_frames_to_time (OutputVideoFrame) const;
+ DCPTime audio_frames_to_time (OutputAudioFrame) const;
/* Proxies for some Playlist methods */
ContentList content () const;
- Time length () const;
+ DCPTime length () const;
bool has_subtitles () const;
OutputVideoFrame best_video_frame_rate () const;
bool content_paths_valid () const;
- FrameRateChange active_frame_rate_change (Time) const;
+ FrameRateChange active_frame_rate_change (DCPTime) const;
libdcp::KDM
make_kdm (
signal_changed (ContentProperty::LENGTH);
}
-Time
+DCPTime
ImageContent::full_length () const
{
shared_ptr<const Film> film = _film.lock ();
std::string summary () const;
std::string technical_summary () const;
void as_xml (xmlpp::Node *) const;
- Time full_length () const;
+ DCPTime full_length () const;
std::string identifier () const;
}
void
-ImageDecoder::seek (Time time, bool)
+ImageDecoder::seek (DCPTime time, bool)
{
_video_position = _video_content->time_to_content_video_frames (time);
}
/* Decoder */
void pass ();
- void seek (Time, bool);
+ void seek (DCPTime, bool);
bool done () const;
private:
}
}
-/** @return Time (in seconds) that this sub-job has been running */
+/** @return DCPTime (in seconds) that this sub-job has been running */
int
Job::elapsed_time () const
{
shared_ptr<Content> content;
shared_ptr<Decoder> decoder;
- /** Time of the last video we emitted relative to the start of the DCP */
- Time video_position;
- /** Time of the last audio we emitted relative to the start of the DCP */
- Time audio_position;
+ /** DCPTime of the last video we emitted relative to the start of the DCP */
+ DCPTime video_position;
+ /** DCPTime of the last audio we emitted relative to the start of the DCP */
+ DCPTime audio_position;
IncomingVideo repeat_video;
int repeat_to_do;
setup_pieces ();
}
- Time earliest_t = TIME_MAX;
+ DCPTime earliest_t = TIME_MAX;
shared_ptr<Piece> earliest;
enum {
VIDEO,
}
if (_audio) {
- boost::optional<Time> audio_done_up_to;
+ boost::optional<DCPTime> audio_done_up_to;
for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
if ((*i)->decoder->done ()) {
continue;
}
if (audio_done_up_to) {
- TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to.get ());
+ TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (audio_done_up_to.get ());
Audio (tb.audio, tb.time);
_audio_position += _film->audio_frames_to_time (tb.audio->frames ());
}
/** @param extra Amount of extra time to add to the content frame's time (for repeat) */
void
-Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame, Time extra)
+Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame, DCPTime extra)
{
/* Keep a note of what came in so that we can repeat it if required */
_last_incoming_video.weak_piece = weak_piece;
return;
}
- Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
+ DCPTime const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
if (content->trimmed (relative_time)) {
return;
}
- Time const time = content->position() + relative_time + extra - content->trim_start ();
+ DCPTime const time = content->position() + relative_time + extra - content->trim_start ();
float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
libdcp::Size const image_size = fit_ratio_within (ratio, _video_container_size);
frame = ro.second;
}
- Time const relative_time = _film->audio_frames_to_time (frame);
+ DCPTime const relative_time = _film->audio_frames_to_time (frame);
if (content->trimmed (relative_time)) {
return;
}
- Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time - content->trim_start ();
+ DCPTime time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time - content->trim_start ();
/* Remap channels */
shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
void
Player::flush ()
{
- TimedAudioBuffers<Time> tb = _audio_merger.flush ();
+ TimedAudioBuffers<DCPTime> tb = _audio_merger.flush ();
if (tb.audio) {
Audio (tb.audio, tb.time);
_audio_position += _film->audio_frames_to_time (tb.audio->frames ());
* @return true on error
*/
void
-Player::seek (Time t, bool accurate)
+Player::seek (DCPTime t, bool accurate)
{
if (!_have_valid_pieces) {
setup_pieces ();
}
/* s is the offset of t from the start position of this content */
- Time s = t - vc->position ();
- s = max (static_cast<Time> (0), s);
+ DCPTime s = t - vc->position ();
+ s = max (static_cast<DCPTime> (0), s);
s = min (vc->length_after_trim(), s);
/* Hence set the piece positions to the `global' time */
}
void
-Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
+Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, DCPTime from, DCPTime to)
{
_in_subtitle.piece = weak_piece;
_in_subtitle.image = image;
Eyes eyes;
bool same;
VideoContent::Frame frame;
- Time extra;
+ DCPTime extra;
};
/** A wrapper for an Image which contains some pending operations; these may
void disable_audio ();
bool pass ();
- void seek (Time, bool);
+ void seek (DCPTime, bool);
- Time video_position () const {
+ DCPTime video_position () const {
return _video_position;
}
* Fourth parameter is true if the image is the same as the last one that was emitted.
* Fifth parameter is the time.
*/
- boost::signals2::signal<void (boost::shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, Time)> Video;
+ boost::signals2::signal<void (boost::shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, DCPTime)> Video;
/** Emitted when some audio data is ready */
- boost::signals2::signal<void (boost::shared_ptr<const AudioBuffers>, Time)> Audio;
+ boost::signals2::signal<void (boost::shared_ptr<const AudioBuffers>, DCPTime)> Audio;
/** Emitted when something has changed such that if we went back and emitted
* the last frame again it would look different. This is not emitted after
friend class PlayerWrapper;
friend class Piece;
- void process_video (boost::weak_ptr<Piece>, boost::shared_ptr<const Image>, Eyes, bool, VideoContent::Frame, Time);
+ void process_video (boost::weak_ptr<Piece>, boost::shared_ptr<const Image>, Eyes, bool, VideoContent::Frame, DCPTime);
void process_audio (boost::weak_ptr<Piece>, boost::shared_ptr<const AudioBuffers>, AudioContent::Frame);
- void process_subtitle (boost::weak_ptr<Piece>, boost::shared_ptr<Image>, dcpomatic::Rect<double>, Time, Time);
+ void process_subtitle (boost::weak_ptr<Piece>, boost::shared_ptr<Image>, dcpomatic::Rect<double>, DCPTime, DCPTime);
void setup_pieces ();
void playlist_changed ();
void content_changed (boost::weak_ptr<Content>, int, bool);
- void do_seek (Time, bool);
+ void do_seek (DCPTime, bool);
void flush ();
void emit_black ();
void emit_silence (OutputAudioFrame);
std::list<boost::shared_ptr<Piece> > _pieces;
/** The time after the last video that we emitted */
- Time _video_position;
+ DCPTime _video_position;
/** The time after the last audio that we emitted */
- Time _audio_position;
+ DCPTime _audio_position;
- AudioMerger<Time, AudioContent::Frame> _audio_merger;
+ AudioMerger<DCPTime, AudioContent::Frame> _audio_merger;
libdcp::Size _video_container_size;
boost::shared_ptr<PlayerImage> _black_frame;
boost::weak_ptr<Piece> piece;
boost::shared_ptr<Image> image;
dcpomatic::Rect<double> rect;
- Time from;
- Time to;
+ DCPTime from;
+ DCPTime to;
} _in_subtitle;
struct {
boost::shared_ptr<Image> image;
Position<int> position;
- Time from;
- Time to;
+ DCPTime from;
+ DCPTime to;
} _out_subtitle;
#ifdef DCPOMATIC_DEBUG
_sequencing_video = true;
ContentList cl = _content;
- Time next = 0;
+ DCPTime next = 0;
for (ContentList::iterator i = _content.begin(); i != _content.end(); ++i) {
if (!dynamic_pointer_cast<VideoContent> (*i)) {
continue;
return best->dcp;
}
-Time
+DCPTime
Playlist::length () const
{
- Time len = 0;
+ DCPTime len = 0;
for (ContentList::const_iterator i = _content.begin(); i != _content.end(); ++i) {
len = max (len, (*i)->end() + 1);
}
}
}
-Time
+DCPTime
Playlist::video_end () const
{
- Time end = 0;
+ DCPTime end = 0;
for (ContentList::const_iterator i = _content.begin(); i != _content.end(); ++i) {
if (dynamic_pointer_cast<const VideoContent> (*i)) {
end = max (end, (*i)->end ());
}
FrameRateChange
-Playlist::active_frame_rate_change (Time t, int dcp_video_frame_rate) const
+Playlist::active_frame_rate_change (DCPTime t, int dcp_video_frame_rate) const
{
for (ContentList::const_iterator i = _content.begin(); i != _content.end(); ++i) {
shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (*i);
void
Playlist::repeat (ContentList c, int n)
{
- pair<Time, Time> range (TIME_MAX, 0);
+ pair<DCPTime, DCPTime> range (TIME_MAX, 0);
for (ContentList::iterator i = c.begin(); i != c.end(); ++i) {
range.first = min (range.first, (*i)->position ());
range.second = max (range.second, (*i)->position ());
range.second = max (range.second, (*i)->end ());
}
- Time pos = range.second;
+ DCPTime pos = range.second;
for (int i = 0; i < n; ++i) {
for (ContentList::iterator i = c.begin(); i != c.end(); ++i) {
shared_ptr<Content> copy = (*i)->clone ();
return;
}
- Time const p = (*previous)->position ();
+ DCPTime const p = (*previous)->position ();
(*previous)->set_position (p + c->length_after_trim ());
c->set_position (p);
sort (_content.begin(), _content.end(), ContentSorter ());
return;
}
- Time const p = (*next)->position ();
+ DCPTime const p = (*next)->position ();
(*next)->set_position (c->position ());
c->set_position (p + c->length_after_trim ());
sort (_content.begin(), _content.end(), ContentSorter ());
std::string video_identifier () const;
- Time length () const;
+ DCPTime length () const;
int best_dcp_frame_rate () const;
- Time video_end () const;
- FrameRateChange active_frame_rate_change (Time, int dcp_frame_rate) const;
+ DCPTime video_end () const;
+ FrameRateChange active_frame_rate_change (DCPTime, int dcp_frame_rate) const;
void set_sequence_video (bool);
void maybe_sequence_video ();
_audio_mapping.as_xml (node->add_child("AudioMapping"));
}
-Time
+DCPTime
SndfileContent::full_length () const
{
shared_ptr<const Film> film = _film.lock ();
std::string technical_summary () const;
std::string information () const;
void as_xml (xmlpp::Node *) const;
- Time full_length () const;
+ DCPTime full_length () const;
/* AudioContent */
int audio_channels () const {
}
void
-SndfileDecoder::seek (Time t, bool accurate)
+SndfileDecoder::seek (DCPTime t, bool accurate)
{
/* XXX */
}
~SndfileDecoder ();
void pass ();
- void seek (Time, bool);
+ void seek (DCPTime, bool);
bool done () const;
int audio_channels () const;
* Image may be 0 to say that there is no current subtitle.
*/
void
-SubtitleDecoder::subtitle (shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
+SubtitleDecoder::subtitle (shared_ptr<Image> image, dcpomatic::Rect<double> rect, DCPTime from, DCPTime to)
{
Subtitle (image, rect, from, to);
}
#include "types.h"
class Film;
-class TimedSubtitle;
+class DCPTimedSubtitle;
class Image;
class SubtitleDecoder : public virtual Decoder
public:
SubtitleDecoder (boost::shared_ptr<const Film>);
- boost::signals2::signal<void (boost::shared_ptr<Image>, dcpomatic::Rect<double>, Time, Time)> Subtitle;
+ boost::signals2::signal<void (boost::shared_ptr<Image>, dcpomatic::Rect<double>, DCPTime, DCPTime)> Subtitle;
protected:
- void subtitle (boost::shared_ptr<Image>, dcpomatic::Rect<double>, Time, Time);
+ void subtitle (boost::shared_ptr<Image>, dcpomatic::Rect<double>, DCPTime, DCPTime);
};
using namespace std;
/** @param n Name to use when giving output */
-PeriodTimer::PeriodTimer (string n)
+PeriodDCPTimer::PeriodDCPTimer (string n)
: _name (n)
{
gettimeofday (&_start, 0);
}
-/** Destroy PeriodTimer and output the time elapsed since its construction */
-PeriodTimer::~PeriodTimer ()
+/** Destroy PeriodDCPTimer and output the time elapsed since its construction */
+PeriodDCPTimer::~PeriodDCPTimer ()
{
struct timeval stop;
gettimeofday (&stop, 0);
/** @param n Name to use when giving output.
* @param s Initial state.
*/
-StateTimer::StateTimer (string n, string s)
+StateDCPTimer::StateDCPTimer (string n, string s)
: _name (n)
{
struct timeval t;
/** @param s New state that the caller is in */
void
-StateTimer::set_state (string s)
+StateDCPTimer::set_state (string s)
{
double const last = _time;
struct timeval t;
_state = s;
}
-/** Destroy StateTimer and generate a summary of the state timings on cout */
-StateTimer::~StateTimer ()
+/** Destroy StateDCPTimer and generate a summary of the state timings on cout */
+StateDCPTimer::~StateDCPTimer ()
{
if (_state.empty ()) {
return;
#include <map>
#include <sys/time.h>
-/** @class PeriodTimer
+/** @class PeriodDCPTimer
* @brief A class to allow timing of a period within the caller.
*
* On destruction, it will output the time since its construction.
*/
-class PeriodTimer
+class PeriodDCPTimer
{
public:
- PeriodTimer (std::string n);
- ~PeriodTimer ();
+ PeriodDCPTimer (std::string n);
+ ~PeriodDCPTimer ();
private:
struct timeval _start;
};
-/** @class StateTimer
+/** @class StateDCPTimer
* @brief A class to allow measurement of the amount of time a program
* spends in one of a set of states.
*
* Once constructed, the caller can call set_state() whenever
- * its state changes. When StateTimer is destroyed, it will
+ * its state changes. When StateDCPTimer is destroyed, it will
* output (to cout) a summary of the time spent in each state.
*/
-class StateTimer
+class StateDCPTimer
{
public:
- StateTimer (std::string n, std::string s);
- ~StateTimer ();
+ StateDCPTimer (std::string n, std::string s);
+ ~StateDCPTimer ();
void set_state (std::string s);
*/
#define SERVER_LINK_VERSION 1
-typedef int64_t Time;
+typedef int64_t DCPTime;
#define TIME_MAX INT64_MAX
-#define TIME_HZ ((Time) 96000)
+#define TIME_HZ ((DCPTime) 96000)
+typedef int64_t ContentTime;
typedef int64_t OutputAudioFrame;
typedef int OutputVideoFrame;
typedef std::vector<boost::shared_ptr<Content> > ContentList;
setup_default_colour_conversion ();
}
-VideoContent::VideoContent (shared_ptr<const Film> f, Time s, VideoContent::Frame len)
+VideoContent::VideoContent (shared_ptr<const Film> f, DCPTime s, VideoContent::Frame len)
: Content (f, s)
, _video_length (len)
, _video_frame_rate (0)
* is that of the next complete frame which starts after `t'.
*/
VideoContent::Frame
-VideoContent::time_to_content_video_frames (Time t) const
+VideoContent::time_to_content_video_frames (DCPTime t) const
{
shared_ptr<const Film> film = _film.lock ();
assert (film);
typedef int Frame;
VideoContent (boost::shared_ptr<const Film>);
- VideoContent (boost::shared_ptr<const Film>, Time, VideoContent::Frame);
+ VideoContent (boost::shared_ptr<const Film>, DCPTime, VideoContent::Frame);
VideoContent (boost::shared_ptr<const Film>, boost::filesystem::path);
VideoContent (boost::shared_ptr<const Film>, boost::shared_ptr<const cxml::Node>);
VideoContent (boost::shared_ptr<const Film>, std::vector<boost::shared_ptr<Content> >);
libdcp::Size video_size_after_3d_split () const;
libdcp::Size video_size_after_crop () const;
- VideoContent::Frame time_to_content_video_frames (Time) const;
+ VideoContent::Frame time_to_content_video_frames (DCPTime) const;
protected:
void take_from_video_examiner (boost::shared_ptr<VideoExaminer>);
static int frame = 0;
void
-process_video (shared_ptr<PlayerImage> image, Eyes eyes, ColourConversion conversion, Time)
+process_video (shared_ptr<PlayerImage> image, Eyes eyes, ColourConversion conversion, DCPTime)
{
shared_ptr<DCPVideoFrame> local (new DCPVideoFrame (image->image(), frame, eyes, conversion, film->video_frame_rate(), 250000000, log_));
shared_ptr<DCPVideoFrame> remote (new DCPVideoFrame (image->image(), frame, eyes, conversion, film->video_frame_rate(), 250000000, log_));
gc->SetPen (*wxLIGHT_GREY_PEN);
gc->StrokePath (grid);
- gc->DrawText (_("Time"), data_width, _height - _y_origin + db_label_height / 2);
+ gc->DrawText (_("DCPTime"), data_width, _height - _y_origin + db_label_height / 2);
if (_type_visible[AudioPoint::PEAK]) {
for (int c = 0; c < MAX_AUDIO_CHANNELS; ++c) {
b->Add (_content_earlier, 1, wxEXPAND);
_content_later = new wxButton (_content_panel, wxID_DOWN);
b->Add (_content_later, 1, wxEXPAND);
- _content_timeline = new wxButton (_content_panel, wxID_ANY, _("Timeline..."));
+ _content_timeline = new wxButton (_content_panel, wxID_ANY, _("DCPTimeline..."));
b->Add (_content_timeline, 1, wxEXPAND | wxLEFT | wxRIGHT);
s->Add (b, 0, wxALL, 4);
_timeline_dialog = 0;
}
- _timeline_dialog = new TimelineDialog (this, _film);
+ _timeline_dialog = new DCPTimelineDialog (this, _film);
_timeline_dialog->Show ();
}
class wxListCtrl;
class wxListEvent;
class Film;
-class TimelineDialog;
+class DCPTimelineDialog;
class Ratio;
-class Timecode;
+class DCPTimecode;
class FilmEditorPanel;
class SubtitleContent;
std::vector<Ratio const *> _ratios;
bool _generally_sensitive;
- TimelineDialog* _timeline_dialog;
+ DCPTimelineDialog* _timeline_dialog;
};
fetch_next_frame ();
- Time const len = _film->length ();
+ DCPTime const len = _film->length ();
if (len) {
int const new_slider_position = 4096 * _player->video_position() / len;
}
void
-FilmViewer::process_video (shared_ptr<PlayerImage> image, Eyes eyes, Time t)
+FilmViewer::process_video (shared_ptr<PlayerImage> image, Eyes eyes, DCPTime t)
{
if (eyes == EYES_RIGHT) {
return;
}
void
-FilmViewer::set_position_text (Time t)
+FilmViewer::set_position_text (DCPTime t)
{
if (!_film) {
_frame_number->SetLabel ("0");
We want to see the one before it, so we need to go back 2.
*/
- Time p = _player->video_position() - _film->video_frames_to_time (2);
+ DCPTime p = _player->video_position() - _film->video_frames_to_time (2);
if (p < 0) {
p = 0;
}
void slider_moved ();
void play_clicked ();
void timer ();
- void process_video (boost::shared_ptr<PlayerImage>, Eyes, Time);
+ void process_video (boost::shared_ptr<PlayerImage>, Eyes, DCPTime);
void calculate_sizes ();
void check_play_state ();
void fetch_current_frame_again ();
void back_clicked ();
void forward_clicked ();
void player_changed (bool);
- void set_position_text (Time);
+ void set_position_text (DCPTime);
boost::shared_ptr<Film> _film;
boost::shared_ptr<Player> _player;
using std::cout;
using boost::lexical_cast;
-Timecode::Timecode (wxWindow* parent)
+DCPTimecode::DCPTimecode (wxWindow* parent)
: wxPanel (parent)
{
wxClientDC dc (parent);
_fixed = add_label_to_sizer (_sizer, this, wxT ("42"), false);
- _hours->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&Timecode::changed, this));
- _minutes->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&Timecode::changed, this));
- _seconds->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&Timecode::changed, this));
- _frames->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&Timecode::changed, this));
- _set_button->Bind (wxEVT_COMMAND_BUTTON_CLICKED, boost::bind (&Timecode::set_clicked, this));
+ _hours->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&DCPTimecode::changed, this));
+ _minutes->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&DCPTimecode::changed, this));
+ _seconds->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&DCPTimecode::changed, this));
+ _frames->Bind (wxEVT_COMMAND_TEXT_UPDATED, boost::bind (&DCPTimecode::changed, this));
+ _set_button->Bind (wxEVT_COMMAND_BUTTON_CLICKED, boost::bind (&DCPTimecode::set_clicked, this));
_set_button->Enable (false);
}
void
-Timecode::set (Time t, int fps)
+DCPTimecode::set (DCPTime t, int fps)
{
int const h = t / (3600 * TIME_HZ);
t -= h * 3600 * TIME_HZ;
_fixed->SetLabel (wxString::Format ("%02d:%02d:%02d.%02d", h, m, s, f));
}
-Time
-Timecode::get (int fps) const
+DCPTime
+DCPTimecode::get (int fps) const
{
- Time t = 0;
+ DCPTime t = 0;
string const h = wx_to_std (_hours->GetValue ());
t += lexical_cast<int> (h.empty() ? "0" : h) * 3600 * TIME_HZ;
string const m = wx_to_std (_minutes->GetValue());
}
void
-Timecode::changed ()
+DCPTimecode::changed ()
{
_set_button->Enable (true);
}
void
-Timecode::set_clicked ()
+DCPTimecode::set_clicked ()
{
Changed ();
_set_button->Enable (false);
}
void
-Timecode::set_editable (bool e)
+DCPTimecode::set_editable (bool e)
{
_editable->Show (e);
_fixed->Show (!e);
#include <wx/wx.h>
#include "lib/types.h"
-class Timecode : public wxPanel
+class DCPTimecode : public wxPanel
{
public:
- Timecode (wxWindow *);
+ DCPTimecode (wxWindow *);
- void set (Time, int);
- Time get (int) const;
+ void set (DCPTime, int);
+ DCPTime get (int) const;
void set_editable (bool);
class View : public boost::noncopyable
{
public:
- View (Timeline& t)
+ View (DCPTimeline& t)
: _timeline (t)
{
protected:
virtual void do_paint (wxGraphicsContext *) = 0;
- int time_x (Time t) const
+ int time_x (DCPTime t) const
{
return _timeline.tracks_position().x + t * _timeline.pixels_per_time_unit();
}
- Timeline& _timeline;
+ DCPTimeline& _timeline;
private:
dcpomatic::Rect<int> _last_paint_bbox;
class ContentView : public View
{
public:
- ContentView (Timeline& tl, shared_ptr<Content> c)
+ ContentView (DCPTimeline& tl, shared_ptr<Content> c)
: View (tl)
, _content (c)
, _track (0)
return;
}
- Time const position = cont->position ();
- Time const len = cont->length_after_trim ();
+ DCPTime const position = cont->position ();
+ DCPTime const len = cont->length_after_trim ();
wxColour selected (colour().Red() / 2, colour().Green() / 2, colour().Blue() / 2);
class AudioContentView : public ContentView
{
public:
- AudioContentView (Timeline& tl, shared_ptr<Content> c)
+ AudioContentView (DCPTimeline& tl, shared_ptr<Content> c)
: ContentView (tl, c)
{}
class VideoContentView : public ContentView
{
public:
- VideoContentView (Timeline& tl, shared_ptr<Content> c)
+ VideoContentView (DCPTimeline& tl, shared_ptr<Content> c)
: ContentView (tl, c)
{}
}
};
-class TimeAxisView : public View
+class DCPTimeAxisView : public View
{
public:
- TimeAxisView (Timeline& tl, int y)
+ DCPTimeAxisView (DCPTimeline& tl, int y)
: View (tl)
, _y (y)
{}
path.AddLineToPoint (_timeline.width(), _y);
gc->StrokePath (path);
- Time t = 0;
+ DCPTime t = 0;
while ((t * _timeline.pixels_per_time_unit()) < _timeline.width()) {
wxGraphicsPath path = gc->CreatePath ();
path.MoveToPoint (time_x (t), _y - 4);
};
-Timeline::Timeline (wxWindow* parent, FilmEditor* ed, shared_ptr<Film> film)
+DCPTimeline::DCPTimeline (wxWindow* parent, FilmEditor* ed, shared_ptr<Film> film)
: wxPanel (parent, wxID_ANY, wxDefaultPosition, wxDefaultSize, wxFULL_REPAINT_ON_RESIZE)
, _film_editor (ed)
, _film (film)
- , _time_axis_view (new TimeAxisView (*this, 32))
+ , _time_axis_view (new DCPTimeAxisView (*this, 32))
, _tracks (0)
, _pixels_per_time_unit (0)
, _left_down (false)
SetDoubleBuffered (true);
#endif
- Bind (wxEVT_PAINT, boost::bind (&Timeline::paint, this));
- Bind (wxEVT_LEFT_DOWN, boost::bind (&Timeline::left_down, this, _1));
- Bind (wxEVT_LEFT_UP, boost::bind (&Timeline::left_up, this, _1));
- Bind (wxEVT_RIGHT_DOWN, boost::bind (&Timeline::right_down, this, _1));
- Bind (wxEVT_MOTION, boost::bind (&Timeline::mouse_moved, this, _1));
- Bind (wxEVT_SIZE, boost::bind (&Timeline::resized, this));
+ Bind (wxEVT_PAINT, boost::bind (&DCPTimeline::paint, this));
+ Bind (wxEVT_LEFT_DOWN, boost::bind (&DCPTimeline::left_down, this, _1));
+ Bind (wxEVT_LEFT_UP, boost::bind (&DCPTimeline::left_up, this, _1));
+ Bind (wxEVT_RIGHT_DOWN, boost::bind (&DCPTimeline::right_down, this, _1));
+ Bind (wxEVT_MOTION, boost::bind (&DCPTimeline::mouse_moved, this, _1));
+ Bind (wxEVT_SIZE, boost::bind (&DCPTimeline::resized, this));
playlist_changed ();
SetMinSize (wxSize (640, tracks() * track_height() + 96));
- _playlist_connection = film->playlist()->Changed.connect (bind (&Timeline::playlist_changed, this));
+ _playlist_connection = film->playlist()->Changed.connect (bind (&DCPTimeline::playlist_changed, this));
}
void
-Timeline::paint ()
+DCPTimeline::paint ()
{
wxPaintDC dc (this);
}
void
-Timeline::playlist_changed ()
+DCPTimeline::playlist_changed ()
{
ensure_ui_thread ();
}
void
-Timeline::assign_tracks ()
+DCPTimeline::assign_tracks ()
{
for (ViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
shared_ptr<ContentView> cv = dynamic_pointer_cast<ContentView> (*i);
}
int
-Timeline::tracks () const
+DCPTimeline::tracks () const
{
return _tracks;
}
void
-Timeline::setup_pixels_per_time_unit ()
+DCPTimeline::setup_pixels_per_time_unit ()
{
shared_ptr<const Film> film = _film.lock ();
if (!film) {
}
shared_ptr<View>
-Timeline::event_to_view (wxMouseEvent& ev)
+DCPTimeline::event_to_view (wxMouseEvent& ev)
{
ViewList::iterator i = _views.begin();
Position<int> const p (ev.GetX(), ev.GetY());
}
void
-Timeline::left_down (wxMouseEvent& ev)
+DCPTimeline::left_down (wxMouseEvent& ev)
{
shared_ptr<View> view = event_to_view (ev);
shared_ptr<ContentView> content_view = dynamic_pointer_cast<ContentView> (view);
}
void
-Timeline::left_up (wxMouseEvent& ev)
+DCPTimeline::left_up (wxMouseEvent& ev)
{
_left_down = false;
}
void
-Timeline::mouse_moved (wxMouseEvent& ev)
+DCPTimeline::mouse_moved (wxMouseEvent& ev)
{
if (!_left_down) {
return;
}
void
-Timeline::right_down (wxMouseEvent& ev)
+DCPTimeline::right_down (wxMouseEvent& ev)
{
shared_ptr<View> view = event_to_view (ev);
shared_ptr<ContentView> cv = dynamic_pointer_cast<ContentView> (view);
}
void
-Timeline::set_position_from_event (wxMouseEvent& ev)
+DCPTimeline::set_position_from_event (wxMouseEvent& ev)
{
wxPoint const p = ev.GetPosition();
return;
}
- Time new_position = _down_view_position + (p.x - _down_point.x) / _pixels_per_time_unit;
+ DCPTime new_position = _down_view_position + (p.x - _down_point.x) / _pixels_per_time_unit;
if (_snap) {
bool first = true;
- Time nearest_distance = TIME_MAX;
- Time nearest_new_position = TIME_MAX;
+ DCPTime nearest_distance = TIME_MAX;
+ DCPTime nearest_new_position = TIME_MAX;
/* Find the nearest content edge; this is inefficient */
for (ViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
{
/* Snap starts to ends */
- Time const d = abs (cv->content()->end() - new_position);
+ DCPTime const d = abs (cv->content()->end() - new_position);
if (first || d < nearest_distance) {
nearest_distance = d;
nearest_new_position = cv->content()->end();
{
/* Snap ends to starts */
- Time const d = abs (cv->content()->position() - (new_position + _down_view->content()->length_after_trim()));
+ DCPTime const d = abs (cv->content()->position() - (new_position + _down_view->content()->length_after_trim()));
if (d < nearest_distance) {
nearest_distance = d;
nearest_new_position = cv->content()->position() - _down_view->content()->length_after_trim ();
}
void
-Timeline::force_redraw (dcpomatic::Rect<int> const & r)
+DCPTimeline::force_redraw (dcpomatic::Rect<int> const & r)
{
RefreshRect (wxRect (r.x, r.y, r.width, r.height), false);
}
shared_ptr<const Film>
-Timeline::film () const
+DCPTimeline::film () const
{
return _film.lock ();
}
void
-Timeline::resized ()
+DCPTimeline::resized ()
{
setup_pixels_per_time_unit ();
}
void
-Timeline::clear_selection ()
+DCPTimeline::clear_selection ()
{
for (ViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
shared_ptr<ContentView> cv = dynamic_pointer_cast<ContentView> (*i);
}
}
-Timeline::ContentViewList
-Timeline::selected_views () const
+DCPTimeline::ContentViewList
+DCPTimeline::selected_views () const
{
ContentViewList sel;
}
ContentList
-Timeline::selected_content () const
+DCPTimeline::selected_content () const
{
ContentList sel;
ContentViewList views = selected_views ();
class View;
class ContentView;
class FilmEditor;
-class TimeAxisView;
+class DCPTimeAxisView;
-class Timeline : public wxPanel
+class DCPTimeline : public wxPanel
{
public:
- Timeline (wxWindow *, FilmEditor *, boost::shared_ptr<Film>);
+ DCPTimeline (wxWindow *, FilmEditor *, boost::shared_ptr<Film>);
boost::shared_ptr<const Film> film () const;
FilmEditor* _film_editor;
boost::weak_ptr<Film> _film;
ViewList _views;
- boost::shared_ptr<TimeAxisView> _time_axis_view;
+ boost::shared_ptr<DCPTimeAxisView> _time_axis_view;
int _tracks;
double _pixels_per_time_unit;
bool _left_down;
wxPoint _down_point;
boost::shared_ptr<ContentView> _down_view;
- Time _down_view_position;
+ DCPTime _down_view_position;
bool _first_move;
ContentMenu _menu;
bool _snap;
using std::cout;
using boost::shared_ptr;
-TimelineDialog::TimelineDialog (FilmEditor* ed, shared_ptr<Film> film)
- : wxDialog (ed, wxID_ANY, _("Timeline"), wxDefaultPosition, wxSize (640, 512), wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER | wxFULL_REPAINT_ON_RESIZE)
+DCPTimelineDialog::DCPTimelineDialog (FilmEditor* ed, shared_ptr<Film> film)
+ : wxDialog (ed, wxID_ANY, _("DCPTimeline"), wxDefaultPosition, wxSize (640, 512), wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER | wxFULL_REPAINT_ON_RESIZE)
, _timeline (this, ed, film)
{
wxBoxSizer* sizer = new wxBoxSizer (wxVERTICAL);
sizer->SetSizeHints (this);
_snap->SetValue (_timeline.snap ());
- _snap->Bind (wxEVT_COMMAND_CHECKBOX_CLICKED, boost::bind (&TimelineDialog::snap_toggled, this));
+ _snap->Bind (wxEVT_COMMAND_CHECKBOX_CLICKED, boost::bind (&DCPTimelineDialog::snap_toggled, this));
}
void
-TimelineDialog::snap_toggled ()
+DCPTimelineDialog::snap_toggled ()
{
_timeline.set_snap (_snap->GetValue ());
}
class Playlist;
-class TimelineDialog : public wxDialog
+class DCPTimelineDialog : public wxDialog
{
public:
- TimelineDialog (FilmEditor *, boost::shared_ptr<Film>);
+ DCPTimelineDialog (FilmEditor *, boost::shared_ptr<Film>);
private:
void snap_toggled ();
- Timeline _timeline;
+ DCPTimeline _timeline;
wxCheckBox* _snap;
};
_sizer->Add (grid, 0, wxALL, 8);
add_label_to_sizer (grid, this, _("Position"), true);
- _position = new Timecode (this);
+ _position = new DCPTimecode (this);
grid->Add (_position);
add_label_to_sizer (grid, this, _("Full length"), true);
- _full_length = new Timecode (this);
+ _full_length = new DCPTimecode (this);
grid->Add (_full_length);
add_label_to_sizer (grid, this, _("Trim from start"), true);
- _trim_start = new Timecode (this);
+ _trim_start = new DCPTimecode (this);
grid->Add (_trim_start);
add_label_to_sizer (grid, this, _("Trim from end"), true);
- _trim_end = new Timecode (this);
+ _trim_end = new DCPTimecode (this);
grid->Add (_trim_end);
add_label_to_sizer (grid, this, _("Play length"), true);
- _play_length = new Timecode (this);
+ _play_length = new DCPTimecode (this);
grid->Add (_play_length);
_position->Changed.connect (boost::bind (&TimingPanel::position_changed, this));
#include "film_editor_panel.h"
-class Timecode;
+class DCPTimecode;
class TimingPanel : public FilmEditorPanel
{
void trim_end_changed ();
void play_length_changed ();
- Timecode* _position;
- Timecode* _full_length;
- Timecode* _trim_start;
- Timecode* _trim_end;
- Timecode* _play_length;
+ DCPTimecode* _position;
+ DCPTimecode* _full_length;
+ DCPTimecode* _trim_start;
+ DCPTimecode* _trim_end;
+ DCPTimecode* _play_length;
};
#define FFMPEG_SEEK_TEST_DEBUG 1
-boost::optional<Time> first_video;
-boost::optional<Time> first_audio;
+boost::optional<DCPTime> first_video;
+boost::optional<DCPTime> first_audio;
static void
-process_video (shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, Time t)
+process_video (shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, DCPTime t)
{
if (!first_video) {
first_video = t;
}
static void
-process_audio (shared_ptr<const AudioBuffers>, Time t)
+process_audio (shared_ptr<const AudioBuffers>, DCPTime t)
{
if (!first_audio) {
first_audio = t;
}
static string
-print_time (Time t, float fps)
+print_time (DCPTime t, float fps)
{
stringstream s;
s << t << " " << (float(t) / TIME_HZ) << "s " << (float(t) * fps / TIME_HZ) << "f";
}
static void
-check (shared_ptr<Player> p, Time t)
+check (shared_ptr<Player> p, DCPTime t)
{
first_video.reset ();
first_audio.reset ();
#define LONG_FFMPEG_SEEK_TEST_DEBUG 1
-boost::optional<Time> first_video;
-boost::optional<Time> first_audio;
+boost::optional<DCPTime> first_video;
+boost::optional<DCPTime> first_audio;
static void
-process_video (shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, Time t)
+process_video (shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, DCPTime t)
{
if (!first_video) {
first_video = t;
}
static void
-process_audio (shared_ptr<const AudioBuffers>, Time t)
+process_audio (shared_ptr<const AudioBuffers>, DCPTime t)
{
if (!first_audio) {
first_audio = t;
}
static string
-print_time (Time t, float fps)
+print_time (DCPTime t, float fps)
{
stringstream s;
s << t << " " << (float(t) / TIME_HZ) << "s " << (float(t) * fps / TIME_HZ) << "f";
}
static void
-check (shared_ptr<Player> p, Time t)
+check (shared_ptr<Player> p, DCPTime t)
{
first_video.reset ();
first_audio.reset ();
{
boost::shared_ptr<Content> content;
boost::shared_ptr<const Image> image;
- Time time;
+ DCPTime time;
};
class PlayerWrapper
_player->Video.connect (bind (&PlayerWrapper::process_video, this, _1, _2, _5));
}
- void process_video (shared_ptr<PlayerImage> i, bool, Time t)
+ void process_video (shared_ptr<PlayerImage> i, bool, DCPTime t)
{
Video v;
v.content = _player->_last_video;
return v;
}
- void seek (Time t, bool ac)
+ void seek (DCPTime t, bool ac)
{
_player->seek (t, ac);
_queue.clear ();