}
void
-ABTranscoder::process_video (shared_ptr<Image> yuv, int frame, shared_ptr<Subtitle> sub, int index)
+ABTranscoder::process_video (shared_ptr<Image> yuv, SourceFrame frame, shared_ptr<Subtitle> sub, int index)
{
if (index == 0) {
/* Keep this image around until we get the other half */
#include <boost/shared_ptr.hpp>
#include <stdint.h>
+#include "util.h"
class Job;
class Encoder;
void go ();
private:
- void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>, int);
+ void process_video (boost::shared_ptr<Image>, SourceFrame, boost::shared_ptr<Subtitle>, int);
boost::shared_ptr<Film> _film_a;
boost::shared_ptr<Film> _film_b;
boost::shared_ptr<Encoder> _encoder;
boost::shared_ptr<Decoder> _da;
boost::shared_ptr<Decoder> _db;
- int _last_frame;
+ SourceFrame _last_frame;
boost::shared_ptr<Image> _image;
};
throw EncodeError ("cannot check hashes of a DCP with unknown length");
}
- int const N = _film->dcp_length().get();
+ SourceFrame const N = _film->dcp_trim_start() + _film->dcp_length().get();
DCPFrameRate const dfr = dcp_frame_rate (_film->frames_per_second ());
- for (int i = 0; i < N; i += dfr.skip) {
+ for (SourceFrame i = _film->dcp_trim_start(); i < N; i += dfr.skip) {
string const j2k_file = _opt->frame_out_path (i, false);
string const hash_file = j2k_file + ".md5";
DCPVideoFrame::DCPVideoFrame (
shared_ptr<const Image> yuv, shared_ptr<Subtitle> sub,
Size out, int p, int subtitle_offset, float subtitle_scale,
- Scaler const * s, int f, float fps, string pp, int clut, int bw, Log* l
+ Scaler const * s, SourceFrame f, float fps, string pp, int clut, int bw, Log* l
)
: _input (yuv)
, _subtitle (sub)
* @param frame Frame index.
*/
void
-EncodedData::write (shared_ptr<const Options> opt, int frame)
+EncodedData::write (shared_ptr<const Options> opt, SourceFrame frame)
{
string const tmp_j2k = opt->frame_out_path (frame, true);
virtual ~EncodedData () {}
void send (boost::shared_ptr<Socket> socket);
- void write (boost::shared_ptr<const Options>, int);
+ void write (boost::shared_ptr<const Options>, SourceFrame);
/** @return data */
uint8_t* data () const {
class DCPVideoFrame
{
public:
- DCPVideoFrame (boost::shared_ptr<const Image>, boost::shared_ptr<Subtitle>, Size, int, int, float, Scaler const *, int, float, std::string, int, int, Log *);
+ DCPVideoFrame (
+ boost::shared_ptr<const Image>, boost::shared_ptr<Subtitle>, Size,
+ int, int, float, Scaler const *, SourceFrame, float, std::string, int, int, Log *
+ );
+
virtual ~DCPVideoFrame ();
boost::shared_ptr<EncodedData> encode_locally ();
boost::shared_ptr<EncodedData> encode_remotely (ServerDescription const *);
- int frame () const {
+ SourceFrame frame () const {
return _frame;
}
int _subtitle_offset;
float _subtitle_scale;
Scaler const * _scaler; ///< scaler to use
- int _frame; ///< frame index within the Film
+ SourceFrame _frame; ///< frame index within the Film's source
int _frames_per_second; ///< Frames per second that we will use for the DCP (rounded)
std::string _post_process; ///< FFmpeg post-processing string to use
int _colour_lut_index; ///< Colour look-up table to use (see Config::colour_lut_index ())
while (pass () == false) {
if (_job && _film->dcp_length()) {
- _job->set_progress (float ((_video_frame_index - _film->dcp_trim_start())) / _film->dcp_length().get());
+ SourceFrame const p = _video_frame_index - _film->dcp_trim_start();
+ _job->set_progress (float (p) / _film->dcp_length().get());
}
}
void go ();
/** @return the index of the last video frame to be processed */
- int video_frame_index () const {
+ SourceFrame video_frame_index () const {
return _video_frame_index;
}
}
/** Emitted when a video frame is ready.
- * First parameter is the frame.
+ * First parameter is the frame within the source.
* Second parameter is its index within the content.
* Third parameter is either 0 or a subtitle that should be on this frame.
*/
- boost::signals2::signal<void (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>)> Video;
+ boost::signals2::signal<void (boost::shared_ptr<Image>, SourceFrame, boost::shared_ptr<Subtitle>)> Video;
/** Emitted when some audio data is ready */
boost::signals2::signal<void (boost::shared_ptr<AudioBuffers>)> Audio;
void emit_audio (uint8_t* data, int size);
/** last video frame to be processed */
- int _video_frame_index;
+ SourceFrame _video_frame_index;
std::list<boost::shared_ptr<FilterGraph> > _filter_graphs;
}
/** @return Index of last frame to be successfully encoded */
-int
+SourceFrame
Encoder::last_frame () const
{
boost::mutex::scoped_lock (_history_mutex);
}
/** Should be called when a frame has been encoded successfully.
- * @param n Frame index.
+ * @param n Source frame index.
*/
void
-Encoder::frame_done (int n)
+Encoder::frame_done (SourceFrame n)
{
boost::mutex::scoped_lock lock (_history_mutex);
_just_skipped = false;
extern "C" {
#include <libavutil/samplefmt.h>
}
+#include "util.h"
class Options;
class Image;
/** Called with a frame of video.
* @param i Video frame image.
- * @param f Frame number within the film.
+ * @param f Frame number within the film's source.
* @param s A subtitle that should be on this frame, or 0.
*/
- virtual void process_video (boost::shared_ptr<const Image> i, int f, boost::shared_ptr<Subtitle> s) = 0;
+ virtual void process_video (boost::shared_ptr<const Image> i, SourceFrame f, boost::shared_ptr<Subtitle> s) = 0;
/** Called with some audio data.
* @param d Array of pointers to floating point sample data for each channel.
float current_frames_per_second () const;
bool skipping () const;
- int last_frame () const;
+ SourceFrame last_frame () const;
protected:
- void frame_done (int n);
+ void frame_done (SourceFrame n);
void frame_skipped ();
/** Film that we are encoding */
static int const _history_size;
/** true if the last frame we processed was skipped (because it was already done) */
bool _just_skipped;
- /** Index of the last frame to be processed */
- int _last_frame;
+ /** Source index of the last frame to be processed */
+ SourceFrame _last_frame;
};
#endif
}
string const tdir = _film->dir ("thumbs");
- vector<int> thumbs;
+ vector<SourceFrame> thumbs;
for (boost::filesystem::directory_iterator i = boost::filesystem::directory_iterator (tdir); i != boost::filesystem::directory_iterator(); ++i) {
return;
}
- set_thumbs (vector<int> ());
+ set_thumbs (vector<SourceFrame> ());
boost::filesystem::remove_all (dir ("thumbs"));
/* This call will recreate the directory */
/* Cached stuff; this is information about our content; we could
look it up each time, but that's slow.
*/
- for (vector<int>::const_iterator i = _thumbs.begin(); i != _thumbs.end(); ++i) {
+ for (vector<SourceFrame>::const_iterator i = _thumbs.begin(); i != _thumbs.end(); ++i) {
f << "thumb " << *i << "\n";
}
f << "width " << _size.width << "\n";
return thumb_file_for_frame (thumb_frame (n));
}
-/** @param n A frame index within the Film.
+/** @param n A frame index within the Film's source.
* @return The path to the thumb's image file for this frame;
* we assume that it exists.
*/
string
-Film::thumb_file_for_frame (int n) const
+Film::thumb_file_for_frame (SourceFrame n) const
{
return thumb_base_for_frame(n) + ".png";
}
-/** Must not be called with the _state_mutex locked */
+/** @param n Thumb index.
+ * Must not be called with the _state_mutex locked.
+ */
string
Film::thumb_base (int n) const
{
}
string
-Film::thumb_base_for_frame (int n) const
+Film::thumb_base_for_frame (SourceFrame n) const
{
stringstream s;
s.width (8);
}
/** @param n A thumb index.
- * @return The frame within the Film that it is for.
+ * @return The frame within the Film's source that it is for.
*
* Must not be called with the _state_mutex locked.
*/
-int
+SourceFrame
Film::thumb_frame (int n) const
{
boost::mutex::scoped_lock lm (_state_mutex);
return rint (t);
}
-boost::optional<int>
+boost::optional<SourceFrame>
Film::dcp_length () const
{
if (!length()) {
- return boost::optional<int> ();
+ return boost::optional<SourceFrame> ();
}
return length().get() - dcp_trim_start() - dcp_trim_end();
}
void
-Film::set_thumbs (vector<int> t)
+Film::set_thumbs (vector<SourceFrame> t)
{
{
boost::mutex::scoped_lock lm (_state_mutex);
}
void
-Film::set_length (int l)
+Film::set_length (SourceFrame l)
{
{
boost::mutex::scoped_lock lm (_state_mutex);
std::string thumb_file (int) const;
std::string thumb_base (int) const;
- int thumb_frame (int) const;
+ SourceFrame thumb_frame (int) const;
int target_audio_sample_rate () const;
void read_metadata ();
Size cropped_size (Size) const;
- boost::optional<int> dcp_length () const;
+ boost::optional<SourceFrame> dcp_length () const;
std::string dci_name () const;
std::string dcp_name () const;
return _scaler;
}
- int dcp_trim_start () const {
+ SourceFrame dcp_trim_start () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _dcp_trim_start;
}
- int dcp_trim_end () const {
+ SourceFrame dcp_trim_end () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _dcp_trim_end;
}
return _package_type;
}
- std::vector<int> thumbs () const {
+ std::vector<SourceFrame> thumbs () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _thumbs;
}
return _size;
}
- boost::optional<int> length () const {
+ boost::optional<SourceFrame> length () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _length;
}
void set_studio (std::string);
void set_facility (std::string);
void set_package_type (std::string);
- void set_thumbs (std::vector<int>);
+ void set_thumbs (std::vector<SourceFrame>);
void set_size (Size);
- void set_length (int);
+ void set_length (SourceFrame);
void unset_length ();
void set_audio_sample_rate (int);
void set_content_digest (std::string);
boost::gregorian::date _dci_date;
- std::string thumb_file_for_frame (int) const;
- std::string thumb_base_for_frame (int) const;
+ std::string thumb_file_for_frame (SourceFrame) const;
+ std::string thumb_base_for_frame (SourceFrame) const;
void signal_changed (Property);
void examine_content_finished ();
std::vector<Filter const *> _filters;
/** Scaler algorithm to use */
Scaler const * _scaler;
- int _dcp_trim_start;
- int _dcp_trim_end;
+ SourceFrame _dcp_trim_start;
+ SourceFrame _dcp_trim_end;
/** true to create an A/B comparison DCP, where the left half of the image
is the video without any filters or post-processing, and the right half
has the specified filters and post-processing.
/* Data which are cached to speed things up */
/** Vector of frame indices for each of our `thumbnails' */
- std::vector<int> _thumbs;
+ std::vector<SourceFrame> _thumbs;
/** Size, in pixels, of the source (ignoring cropping) */
Size _size;
/** Actual length of the source (in video frames) from examining it */
- boost::optional<int> _length;
+ boost::optional<SourceFrame> _length;
/** Sample rate of the source audio, in Hz */
int _audio_sample_rate;
/** MD5 digest of our content file */
}
void
-ImageMagickEncoder::process_video (shared_ptr<const Image> image, int frame, shared_ptr<Subtitle> sub)
+ImageMagickEncoder::process_video (shared_ptr<const Image> image, SourceFrame frame, shared_ptr<Subtitle> sub)
{
shared_ptr<Image> scaled = image->scale_and_convert_to_rgb (_opt->out_size, _opt->padding, _film->scaler());
shared_ptr<Image> compact (new CompactImage (scaled));
ImageMagickEncoder (boost::shared_ptr<const Film> f, boost::shared_ptr<const Options> o);
void process_begin (int64_t audio_channel_layout) {}
- void process_video (boost::shared_ptr<const Image>, int, boost::shared_ptr<Subtitle>);
+ void process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
void process_audio (boost::shared_ptr<const AudioBuffers>) {}
void process_end () {}
};
}
void
-J2KStillEncoder::process_video (shared_ptr<const Image> yuv, int frame, shared_ptr<Subtitle> sub)
+J2KStillEncoder::process_video (shared_ptr<const Image> yuv, SourceFrame frame, shared_ptr<Subtitle> sub)
{
pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
DCPVideoFrame* f = new DCPVideoFrame (
J2KStillEncoder (boost::shared_ptr<const Film>, boost::shared_ptr<const Options>);
void process_begin (int64_t audio_channel_layout) {}
- void process_video (boost::shared_ptr<const Image>, int, boost::shared_ptr<Subtitle>);
+ void process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
void process_audio (boost::shared_ptr<const AudioBuffers>) {}
void process_end () {}
};
}
void
-J2KWAVEncoder::process_video (shared_ptr<const Image> yuv, int frame, shared_ptr<Subtitle> sub)
+J2KWAVEncoder::process_video (shared_ptr<const Image> yuv, SourceFrame frame, shared_ptr<Subtitle> sub)
{
boost::mutex::scoped_lock lock (_worker_mutex);
~J2KWAVEncoder ();
void process_begin (int64_t audio_channel_layout);
- void process_video (boost::shared_ptr<const Image>, int, boost::shared_ptr<Subtitle>);
+ void process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
void process_audio (boost::shared_ptr<const AudioBuffers>);
void process_end ();
return String::compose ("Make DCP for %1", _film->name());
}
+/** @param f DCP frame index */
string
MakeDCPJob::j2c_path (int f) const
{
- return _opt->frame_out_path (f * dcp_frame_rate(_film->frames_per_second()).skip, false);
+ SourceFrame const s = (f * dcp_frame_rate(_film->frames_per_second()).skip) + _film->dcp_trim_start();
+ return _opt->frame_out_path (s, false);
}
string
int frames = 0;
switch (_film->content_type ()) {
case VIDEO:
+ /* Source frames -> DCP frames */
frames = _film->dcp_length().get() / dfr.skip;
break;
case STILL:
return _frame_out_path;
}
- /** @param f Frame index.
+ /** @param f Source frame index.
* @param t true to return a temporary file path, otherwise a permanent one.
* @return The path to write this video frame to.
*/
- std::string frame_out_path (int f, bool t, std::string e = "") const {
+ std::string frame_out_path (SourceFrame f, bool t, std::string e = "") const {
if (e.empty ()) {
e = _frame_out_extension;
}
float ratio; ///< ratio of the wanted output image (not considering padding)
int padding; ///< number of pixels of padding (in terms of the output size) each side of the image
bool apply_crop; ///< true to apply cropping
- int decode_video_skip; ///< skip frames such that we don't decode any frame where (index % decode_video_skip) != 0; e.g.
- ///< 1 for every frame, 2 for every other frame, etc.
+ /** Skip frames such that we don't decode any frame where (index % decode_video_skip) != 0; e.g.
+ * 1 for every frame, 2 for every other frame, etc.
+ */
+ SourceFrame decode_video_skip;
bool decode_audio; ///< true to decode audio, otherwise false
bool decode_subtitles;
}
/* We assume that dcp_length() is valid */
- return ((_film->dcp_length().get() - _encoder->last_frame()) / fps);
+ SourceFrame const left = _film->dcp_length().get() - _encoder->last_frame() - _film->dcp_trim_start();
+ return left / fps;
}
extern std::string md5_digest (void const *, int);
extern void ensure_ui_thread ();
+typedef int SourceFrame;
+
struct DCPFrameRate
{
int frames_per_second;
table->Add (_trim_end, 1);
add_label_to_sizer (table, this, "frames");
+ if (_film->length()) {
+ _trim_start->SetRange (0, _film->length().get());
+ _trim_end->SetRange (0, _film->length().get());
+ }
+
_trim_start->SetValue (_film->dcp_trim_start());
_trim_end->SetValue (_film->dcp_trim_end());
{
shared_ptr<Film> f = new_test_film ("paths_test");
f->set_directory ("build/test/a/b/c/d/e");
- vector<int> thumbs;
+ vector<SourceFrame> thumbs;
thumbs.push_back (42);
f->set_thumbs (thumbs);
BOOST_CHECK_EQUAL (f->thumb_file (0), "build/test/a/b/c/d/e/thumbs/00000042.png");