{
_bad = 0;
- if (!_film->dcp_length()) {
- throw EncodeError ("cannot check hashes of a DCP with unknown length");
+ if (!_film->dcp_intrinsic_duration()) {
+ throw EncodeError ("cannot check hashes of a DCP with unknown intrinsic duration");
}
- SourceFrame const N = _film->trim_start() + _film->dcp_length().get();
- DCPFrameRate const dfr (_film->frames_per_second ());
-
- int const inc = dfr.skip ? 2 : 1;
-
- for (SourceFrame i = _film->trim_start(); i < N; i += inc) {
+ int const N = _film->dcp_intrinsic_duration().get();
+ for (int i = 0; i < N; ++i) {
string const j2k_file = _film->frame_out_path (i, false);
string const hash_file = _film->hash_out_path (i, false);
using std::stringstream;
using std::ofstream;
using boost::shared_ptr;
+using libdcp::Size;
/** Construct a DCP video frame.
* @param input Input image.
* @param out Required size of output, in pixels (including any padding).
* @param s Scaler to use.
* @param p Number of pixels of padding either side of the image.
- * @param f Index of the frame within the Film's source.
+ * @param f Index of the frame within the DCP's intrinsic duration.
* @param fps Frames per second of the Film's source.
* @param pp FFmpeg post-processing string to use.
* @param clut Colour look-up table to use (see Config::colour_lut_index ())
DCPVideoFrame::DCPVideoFrame (
shared_ptr<const Image> yuv, shared_ptr<Subtitle> sub,
Size out, int p, int subtitle_offset, float subtitle_scale,
- Scaler const * s, SourceFrame f, float fps, string pp, int clut, int bw, Log* l
+ Scaler const * s, int f, float fps, string pp, int clut, int bw, Log* l
)
: _input (yuv)
, _subtitle (sub)
/** Write this data to a J2K file.
* @param opt Options.
- * @param frame Frame index.
+ * @param frame DCP Frame index.
*/
void
-EncodedData::write (shared_ptr<const Film> film, SourceFrame frame)
+EncodedData::write (shared_ptr<const Film> film, int frame)
{
string const tmp_j2k = film->frame_out_path (frame, true);
virtual ~EncodedData () {}
void send (boost::shared_ptr<Socket> socket);
- void write (boost::shared_ptr<const Film>, SourceFrame);
+ void write (boost::shared_ptr<const Film>, int);
/** @return data */
uint8_t* data () const {
{
public:
DCPVideoFrame (
- boost::shared_ptr<const Image>, boost::shared_ptr<Subtitle>, Size,
- int, int, float, Scaler const *, SourceFrame, float, std::string, int, int, Log *
+ boost::shared_ptr<const Image>, boost::shared_ptr<Subtitle>, libdcp::Size,
+ int, int, float, Scaler const *, int, float, std::string, int, int, Log *
);
virtual ~DCPVideoFrame ();
boost::shared_ptr<EncodedData> encode_locally ();
boost::shared_ptr<EncodedData> encode_remotely (ServerDescription const *);
- SourceFrame frame () const {
+ int frame () const {
return _frame;
}
boost::shared_ptr<const Image> _input; ///< the input image
boost::shared_ptr<Subtitle> _subtitle; ///< any subtitle that should be on the image
- Size _out_size; ///< the required size of the output, in pixels
+ libdcp::Size _out_size; ///< the required size of the output, in pixels
int _padding;
int _subtitle_offset;
float _subtitle_scale;
Scaler const * _scaler; ///< scaler to use
- SourceFrame _frame; ///< frame index within the Film's source
+ int _frame; ///< frame index within the DCP's intrinsic duration
int _frames_per_second; ///< Frames per second that we will use for the DCP (rounded)
std::string _post_process; ///< FFmpeg post-processing string to use
int _colour_lut; ///< Colour look-up table to use
Encoder::Encoder (shared_ptr<const Film> f)
: _film (f)
, _just_skipped (false)
- , _video_frame (0)
- , _audio_frame (0)
+ , _video_frames_in (0)
+ , _audio_frames_in (0)
+ , _video_frames_out (0)
+ , _audio_frames_out (0)
#ifdef HAVE_SWRESAMPLE
, _swr_context (0)
#endif
- , _audio_frames_written (0)
, _process_end (false)
{
if (_film->audio_stream()) {
return _just_skipped;
}
-/** @return Number of video frames that have been received */
-SourceFrame
-Encoder::video_frame () const
+/** @return Number of video frames that have been sent out */
+int
+Encoder::video_frames_out () const
{
boost::mutex::scoped_lock (_history_mutex);
- return _video_frame;
+ return _video_frames_out;
}
/** Should be called when a frame has been encoded successfully.
{
DCPFrameRate dfr (_film->frames_per_second ());
- if (dfr.skip && (_video_frame % 2)) {
- ++_video_frame;
+ if (dfr.skip && (_video_frames_in % 2)) {
+ ++_video_frames_in;
return;
}
}
/* Only do the processing if we don't already have a file for this frame */
- if (boost::filesystem::exists (_film->frame_out_path (_video_frame, false))) {
+ if (boost::filesystem::exists (_film->frame_out_path (_video_frames_out, false))) {
frame_skipped ();
return;
}
as on windows the link is really a copy and the reference frame might not have
finished encoding yet.
*/
- _links_required.push_back (make_pair (_last_real_frame.get(), _video_frame));
+ _links_required.push_back (make_pair (_last_real_frame.get(), _video_frames_out));
} else {
/* Queue this new frame for encoding */
pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
new DCPVideoFrame (
image, sub, _film->format()->dcp_size(), _film->format()->dcp_padding (_film),
_film->subtitle_offset(), _film->subtitle_scale(),
- _film->scaler(), _video_frame, _film->frames_per_second(), s.second,
+ _film->scaler(), _video_frames_out, _film->frames_per_second(), s.second,
_film->colour_lut(), _film->j2k_bandwidth(),
_film->log()
)
));
_worker_condition.notify_all ();
- _last_real_frame = _video_frame;
+ _last_real_frame = _video_frames_out;
}
- ++_video_frame;
+ ++_video_frames_in;
+ ++_video_frames_out;
+
+ if (dfr.repeat) {
+ _links_required.push_back (make_pair (_video_frames_out, _video_frames_out - 1));
+ ++_video_frames_out;
+ }
}
void
write_audio (data);
- _audio_frame += data->frames ();
+ _audio_frames_in += data->frames ();
}
void
sf_write_float (_sound_files[i], audio->data(i), audio->frames());
}
- _audio_frames_written += audio->frames ();
+ _audio_frames_out += audio->frames ();
}
void
float current_frames_per_second () const;
bool skipping () const;
- SourceFrame video_frame () const;
+ int video_frames_out () const;
private:
bool _just_skipped;
/** Number of video frames received so far */
- SourceFrame _video_frame;
+ SourceFrame _video_frames_in;
/** Number of audio frames received so far */
- int64_t _audio_frame;
+ int64_t _audio_frames_in;
+ /** Number of video frames written for the DCP so far */
+ int _video_frames_out;
+ /** Number of audio frames written for the DCP so far */
+ int64_t _audio_frames_out;
#if HAVE_SWRESAMPLE
SwrContext* _swr_context;
/** List of links that we need to create when all frames have been processed;
* such that we need to call link (first, second) for each member of this list.
* In other words, `first' is a `real' frame and `second' should be a link to `first'.
+ * Frames are DCP frames.
*/
std::list<std::pair<int, int> > _links_required;
std::vector<SNDFILE*> _sound_files;
- int64_t _audio_frames_written;
boost::optional<int> _last_real_frame;
bool _process_end;
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
+using libdcp::Size;
FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, DecodeOptions o, Job* j)
: Decoder (f, o, j)
~FFmpegDecoder ();
float frames_per_second () const;
- Size native_size () const;
+ libdcp::Size native_size () const;
SourceFrame length () const;
int time_base_numerator () const;
int time_base_denominator () const;
using boost::ends_with;
using boost::starts_with;
using boost::optional;
+using libdcp::Size;
int const Film::state_version = 2;
, _package_type (o._package_type)
, _size (o._size)
, _length (o._length)
+ , _dcp_intrinsic_duration (o._dcp_intrinsic_duration)
, _content_digest (o._content_digest)
, _content_audio_streams (o._content_audio_streams)
, _external_audio_stream (o._external_audio_stream)
f << "width " << _size.width << "\n";
f << "height " << _size.height << "\n";
f << "length " << _length.get_value_or(0) << "\n";
+ f << "dcp_intrinsic_duration " << _dcp_intrinsic_duration.get_value_or(0) << "\n";
f << "content_digest " << _content_digest << "\n";
for (vector<shared_ptr<AudioStream> >::const_iterator i = _content_audio_streams.begin(); i != _content_audio_streams.end(); ++i) {
if (vv) {
_length = vv;
}
+ } else if (k == "dcp_intrinsic_duration") {
+ int const vv = atoi (v.c_str ());
+ if (vv) {
+ _dcp_intrinsic_duration = vv;
+ }
} else if (k == "content_digest") {
_content_digest = v;
} else if (k == "content_audio_stream" || (!version && k == "audio_stream")) {
return rint (t);
}
-boost::optional<int>
-Film::dcp_length () const
+int
+Film::still_duration_in_frames () const
{
- if (content_type() == STILL) {
- return _still_duration * frames_per_second();
- }
-
- if (!length()) {
- return boost::optional<int> ();
- }
-
- return length().get() - trim_start() - trim_end();
+ return still_duration() * frames_per_second();
}
/** @return a DCI-compliant name for a DCP of this film */
_length = boost::none;
}
signal_changed (LENGTH);
-}
+}
+
+void
+Film::set_dcp_intrinsic_duration (int d)
+{
+ {
+ boost::mutex::scoped_lock lm (_state_mutex);
+ _dcp_intrinsic_duration = d;
+ }
+ signal_changed (DCP_INTRINSIC_DURATION);
+}
void
Film::set_content_digest (string d)
return _external_audio_stream;
}
-/** @param f Source frame index.
+/** @param f DCP frame index.
* @param t true to return a temporary file path, otherwise a permanent one.
* @return The path to write this video frame to.
*/
string
-Film::frame_out_path (SourceFrame f, bool t) const
+Film::frame_out_path (int f, bool t) const
{
stringstream s;
s << j2k_dir() << "/";
}
string
-Film::hash_out_path (SourceFrame f, bool t) const
+Film::hash_out_path (int f, bool t) const
{
return frame_out_path (f, t) + ".md5";
}
std::string file (std::string f) const;
std::string dir (std::string d) const;
- std::string frame_out_path (SourceFrame f, bool t) const;
- std::string hash_out_path (SourceFrame f, bool t) const;
+ std::string frame_out_path (int f, bool t) const;
+ std::string hash_out_path (int f, bool t) const;
std::string multichannel_audio_out_path (int c, bool t) const;
std::string content_path () const;
void write_metadata () const;
void read_metadata ();
- Size cropped_size (Size) const;
- boost::optional<int> dcp_length () const;
+ libdcp::Size cropped_size (libdcp::Size) const;
std::string dci_name () const;
std::string dcp_name () const;
+ boost::optional<int> dcp_intrinsic_duration () const {
+ return _dcp_intrinsic_duration;
+ }
+
/** @return true if our state has changed since we last saved it */
bool dirty () const {
return _dirty;
DCI_METADATA,
SIZE,
LENGTH,
+ DCP_INTRINSIC_DURATION,
CONTENT_AUDIO_STREAMS,
SUBTITLE_STREAMS,
FRAMES_PER_SECOND,
return _scaler;
}
- SourceFrame trim_start () const {
+ int trim_start () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _trim_start;
}
- SourceFrame trim_end () const {
+ int trim_end () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _trim_end;
}
return _still_duration;
}
+ int still_duration_in_frames () const;
+
boost::shared_ptr<SubtitleStream> subtitle_stream () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _subtitle_stream;
return _package_type;
}
- Size size () const {
+ libdcp::Size size () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _size;
}
void set_studio (std::string);
void set_facility (std::string);
void set_package_type (std::string);
- void set_size (Size);
+ void set_size (libdcp::Size);
void set_length (SourceFrame);
void unset_length ();
+ void set_dcp_intrinsic_duration (int);
void set_content_digest (std::string);
void set_content_audio_streams (std::vector<boost::shared_ptr<AudioStream> >);
void set_subtitle_streams (std::vector<boost::shared_ptr<SubtitleStream> >);
/* Data which are cached to speed things up */
/** Size, in pixels, of the source (ignoring cropping) */
- Size _size;
+ libdcp::Size _size;
/** The length of the source, in video frames (as far as we know) */
boost::optional<SourceFrame> _length;
+ boost::optional<int> _dcp_intrinsic_duration;
/** MD5 digest of our content file */
std::string _content_digest;
/** The audio streams in our content */
using std::string;
using std::list;
using boost::shared_ptr;
+using libdcp::Size;
/** Construct a FilterGraph for the settings in a film.
* @param film Film.
class FilterGraph
{
public:
- FilterGraph (boost::shared_ptr<Film> film, FFmpegDecoder* decoder, Size s, AVPixelFormat p);
+ FilterGraph (boost::shared_ptr<Film> film, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p);
- bool can_process (Size s, AVPixelFormat p) const;
+ bool can_process (libdcp::Size s, AVPixelFormat p) const;
std::list<boost::shared_ptr<Image> > process (AVFrame const * frame);
private:
AVFilterContext* _buffer_src_context;
AVFilterContext* _buffer_sink_context;
- Size _size; ///< size of the images that this chain can process
+ libdcp::Size _size; ///< size of the images that this chain can process
AVPixelFormat _pixel_format; ///< pixel format of the images that this chain can process
};
using std::stringstream;
using std::vector;
using boost::shared_ptr;
+using libdcp::Size;
vector<Format const *> Format::_formats;
class Format
{
public:
- Format (Size dcp, std::string id, std::string n, std::string d)
+ Format (libdcp::Size dcp, std::string id, std::string n, std::string d)
: _dcp_size (dcp)
, _id (id)
, _nickname (n)
* put in a DCP for this ratio. This size will not correspond
* to the ratio when we are doing things like 16:9 in a Flat frame.
*/
- Size dcp_size () const {
+ libdcp::Size dcp_size () const {
return _dcp_size;
}
* put in a DCP for this ratio. This size will not correspond
* to the ratio when we are doing things like 16:9 in a Flat frame.
*/
- Size _dcp_size;
+ libdcp::Size _dcp_size;
/** id for use in metadata */
std::string _id;
/** nickname (e.g. Flat, Scope) */
class FixedFormat : public Format
{
public:
- FixedFormat (int, Size, std::string, std::string, std::string);
+ FixedFormat (int, libdcp::Size, std::string, std::string, std::string);
int ratio_as_integer (boost::shared_ptr<const Film>) const {
return _ratio;
class VariableFormat : public Format
{
public:
- VariableFormat (Size, std::string, std::string, std::string);
+ VariableFormat (libdcp::Size, std::string, std::string, std::string);
int ratio_as_integer (boost::shared_ptr<const Film> f) const;
float ratio_as_float (boost::shared_ptr<const Film> f) const;
using namespace std;
using namespace boost;
+using libdcp::Size;
void
Image::swap (Image& other)
virtual int * stride () const = 0;
/** @return Size of the image, in pixels */
- virtual Size size () const = 0;
+ virtual libdcp::Size size () const = 0;
int components () const;
int lines (int) const;
- boost::shared_ptr<Image> scale_and_convert_to_rgb (Size out_size, int padding, Scaler const * scaler, bool aligned) const;
- boost::shared_ptr<Image> scale (Size, Scaler const *, bool aligned) const;
+ boost::shared_ptr<Image> scale_and_convert_to_rgb (libdcp::Size out_size, int padding, Scaler const * scaler, bool aligned) const;
+ boost::shared_ptr<Image> scale (libdcp::Size, Scaler const *, bool aligned) const;
boost::shared_ptr<Image> post_process (std::string, bool aligned) const;
void alpha_blend (boost::shared_ptr<const Image> image, Position pos);
boost::shared_ptr<Image> crop (Crop c, bool aligned) const;
uint8_t ** data () const;
int * line_size () const;
int * stride () const;
- Size size () const;
+ libdcp::Size size () const;
private:
/* Not allowed */
class SimpleImage : public Image
{
public:
- SimpleImage (AVPixelFormat, Size, bool);
+ SimpleImage (AVPixelFormat, libdcp::Size, bool);
SimpleImage (SimpleImage const &);
SimpleImage& operator= (SimpleImage const &);
~SimpleImage ();
uint8_t ** data () const;
int * line_size () const;
int * stride () const;
- Size size () const;
+ libdcp::Size size () const;
protected:
void allocate ();
void swap (SimpleImage &);
private:
- Size _size; ///< size in pixels
+ libdcp::Size _size; ///< size in pixels
uint8_t** _data; ///< array of pointers to components
int* _line_size; ///< array of sizes of the data in each line, in pixels (without any alignment padding bytes)
int* _stride; ///< array of strides for each line (including any alignment padding bytes)
using std::cout;
using boost::shared_ptr;
+using libdcp::Size;
ImageMagickDecoder::ImageMagickDecoder (
boost::shared_ptr<Film> f, DecodeOptions o, Job* j)
ImageMagickDecoder::pass ()
{
if (_iter == _files.end()) {
- if (!_film->dcp_length() || video_frame() >= _film->dcp_length().get()) {
+ if (video_frame() >= _film->still_duration_in_frames()) {
return true;
}
return 0;
}
- Size native_size () const;
+ libdcp::Size native_size () const;
SourceFrame length () const {
/* We don't know */
string
MakeDCPJob::j2c_path (int f, int offset) const
{
- DCPFrameRate dfr (_film->frames_per_second());
- int const mult = dfr.skip ? 2 : 1;
- SourceFrame const s = ((f + offset) * mult) + _film->trim_start();
- return _film->frame_out_path (s, false);
+ return _film->frame_out_path (f, false);
}
string
void
MakeDCPJob::run ()
{
- if (!_film->dcp_length()) {
- throw EncodeError ("cannot make a DCP when the source length is not known");
+ if (!_film->dcp_intrinsic_duration()) {
+ throw EncodeError ("cannot make a DCP when its intrinsic duration is not known");
}
descend (0.9);
/* Remove any old DCP */
boost::filesystem::remove_all (dcp_path);
+ int const frames = _film->dcp_intrinsic_duration().get();
+ int const duration = frames - _film->trim_start() - _film->trim_end();
DCPFrameRate const dfr (_film->frames_per_second ());
- int frames = 0;
- switch (_film->content_type ()) {
- case VIDEO:
- /* Source frames -> DCP frames */
- frames = _film->dcp_length().get();
- if (dfr.skip) {
- frames /= 2;
- }
- break;
- case STILL:
- frames = _film->still_duration() * 24;
- break;
- }
-
libdcp::DCP dcp (_film->dir (_film->dcp_name()));
dcp.Progress.connect (boost::bind (&MakeDCPJob::dcp_progress, this, _1));
&dcp.Progress,
dfr.frames_per_second,
this_time,
- _film->format()->dcp_size().width,
- _film->format()->dcp_size().height
+ _film->format()->dcp_size()
)
);
pa->set_entry_point (_film->trim_start ());
- pa->set_duration (_film->duration ());
+ pa->set_duration (duration);
ascend ();
);
sa->set_entry_point (_film->trim_start ());
- sa->set_duration (_film->duration ());
+ sa->set_duration (duration);
ascend ();
}
int _video_frames;
int64_t _audio_frames;
boost::optional<AVPixelFormat> _pixel_format;
- boost::optional<Size> _size;
+ boost::optional<libdcp::Size> _size;
boost::optional<int> _channels;
};
using boost::algorithm::split;
using boost::thread;
using boost::bind;
+using libdcp::Size;
/** Create a server description from a string of metadata returned from as_metadata().
* @param v Metadata.
using namespace std;
using namespace boost;
+using libdcp::Size;
/** Construct a TimedSubtitle. This is a subtitle image, position,
* and a range of time over which it should be shown.
set_progress (1);
set_state (FINISHED_OK);
+ _film->set_dcp_intrinsic_duration (_encoder->video_frames_out ());
+
_film->log()->log ("Transcode job completed successfully");
+ _film->log()->log (String::compose ("DCP intrinsic duration is %1", _encoder->video_frames_out()));
} catch (std::exception& e) {
return 0;
}
- if (!_film->dcp_length()) {
+ if (!_film->length()) {
return 0;
}
+ /* Compute approximate proposed length here, as it's only here that we need it */
+ int length = _film->length().get();
+ DCPFrameRate const dfr (_film->frames_per_second ());
+ if (dfr.skip) {
+ length /= 2;
+ }
+ /* If we are repeating it shouldn't affect transcode time, so don't take it into account */
+
/* We assume that dcp_length() is valid, if it is set */
- SourceFrame const left = _film->trim_start() + _film->dcp_length().get() - _encoder->video_frame();
+ int const left = length - _encoder->video_frames_out();
return left / fps;
}
using namespace std;
using namespace boost;
+using libdcp::Size;
thread::id ui_thread;
}
-bool operator== (Size const & a, Size const & b)
-{
- return (a.width == b.width && a.height == b.height);
-}
-
-bool operator!= (Size const & a, Size const & b)
-{
- return !(a == b);
-}
-
bool operator== (Crop const & a, Crop const & b)
{
return (a.left == b.left && a.right == b.right && a.top == b.top && a.bottom == b.bottom);
#include <vector>
#include <boost/shared_ptr.hpp>
#include <boost/asio.hpp>
+#include <libdcp/util.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
VIDEO ///< content is a video
};
-/** @class Size
- * @brief Representation of the size of something */
-struct Size
-{
- /** Construct a zero Size */
- Size ()
- : width (0)
- , height (0)
- {}
-
- /** @param w Width.
- * @param h Height.
- */
- Size (int w, int h)
- : width (w)
- , height (h)
- {}
-
- /** width */
- int width;
- /** height */
- int height;
-};
-
-extern bool operator== (Size const & a, Size const & b);
-extern bool operator!= (Size const & a, Size const & b);
-
/** @struct Crop
* @brief A description of the crop of an image or video.
*/
return Position (x, y);
}
- Size size () const {
- return Size (width, height);
+ libdcp::Size size () const {
+ return libdcp::Size (width, height);
}
Rect intersection (Rect const & other) const;
};
-extern std::string crop_string (Position, Size);
+extern std::string crop_string (Position, libdcp::Size);
extern int dcp_audio_sample_rate (int);
extern int dcp_audio_channels (int);
extern std::string colour_lut_index_to_name (int index);
/** @return video frames per second, or 0 if unknown */
virtual float frames_per_second () const = 0;
/** @return native size in pixels */
- virtual Size native_size () const = 0;
+ virtual libdcp::Size native_size () const = 0;
/** @return length (in source video frames), according to our content's header */
virtual SourceFrame length () const = 0;
_trim_end->SetRange (0, _film->length().get());
}
break;
+ case Film::DCP_INTRINSIC_DURATION:
+ break;
case Film::DCP_CONTENT_TYPE:
checked_set (_dcp_content_type, DCPContentType::as_index (_film->dcp_content_type ()));
_dcp_name->SetLabel (std_to_wx (_film->dcp_name ()));
using std::cout;
using std::list;
using boost::shared_ptr;
+using libdcp::Size;
FilmViewer::FilmViewer (shared_ptr<Film> f, wxWindow* p)
: wxPanel (p)
return "";
}
- if (_film->dcp_length()) {
+ if (_film->length()) {
/* XXX: encoded_frames() should check which frames have been encoded */
- u << " (" << ((_film->encoded_frames() - _film->trim_start()) * 100 / _film->dcp_length().get()) << "%)";
+ u << " (" << (_film->encoded_frames() * 100 / _film->length().get()) << "%)";
}
return u.str ();
}
width 0
height 0
length 0
+dcp_intrinsic_duration 0
content_digest
external_audio_stream external 0 0
frames_per_second 0
BOOST_AUTO_TEST_CASE (client_server_test)
{
- shared_ptr<Image> image (new SimpleImage (PIX_FMT_RGB24, Size (1998, 1080), false));
+ shared_ptr<Image> image (new SimpleImage (PIX_FMT_RGB24, libdcp::Size (1998, 1080), false));
uint8_t* p = image->data()[0];
for (int y = 0; y < 1080; ++y) {
}
}
- shared_ptr<Image> sub_image (new SimpleImage (PIX_FMT_RGBA, Size (100, 200), false));
+ shared_ptr<Image> sub_image (new SimpleImage (PIX_FMT_RGBA, libdcp::Size (100, 200), false));
p = sub_image->data()[0];
for (int y = 0; y < 200; ++y) {
for (int x = 0; x < 100; ++x) {
new DCPVideoFrame (
image,
subtitle,
- Size (1998, 1080),
+ libdcp::Size (1998, 1080),
0,
0,
1,
BOOST_AUTO_TEST_CASE (compact_image_test)
{
- SimpleImage* s = new SimpleImage (PIX_FMT_RGB24, Size (50, 50), false);
+ SimpleImage* s = new SimpleImage (PIX_FMT_RGB24, libdcp::Size (50, 50), false);
BOOST_CHECK_EQUAL (s->components(), 1);
BOOST_CHECK_EQUAL (s->stride()[0], 50 * 3);
BOOST_CHECK_EQUAL (s->line_size()[0], 50 * 3);
BOOST_CHECK (t->stride()[0] == s->stride()[0]);
/* assignment operator */
- SimpleImage* u = new SimpleImage (PIX_FMT_YUV422P, Size (150, 150), true);
+ SimpleImage* u = new SimpleImage (PIX_FMT_YUV422P, libdcp::Size (150, 150), true);
*u = *s;
BOOST_CHECK_EQUAL (u->components(), 1);
BOOST_CHECK_EQUAL (u->stride()[0], 50 * 3);
BOOST_AUTO_TEST_CASE (aligned_image_test)
{
- SimpleImage* s = new SimpleImage (PIX_FMT_RGB24, Size (50, 50), true);
+ SimpleImage* s = new SimpleImage (PIX_FMT_RGB24, libdcp::Size (50, 50), true);
BOOST_CHECK_EQUAL (s->components(), 1);
/* 160 is 150 aligned to the nearest 32 bytes */
BOOST_CHECK_EQUAL (s->stride()[0], 160);
BOOST_CHECK (t->stride()[0] == s->stride()[0]);
/* assignment operator */
- SimpleImage* u = new SimpleImage (PIX_FMT_YUV422P, Size (150, 150), false);
+ SimpleImage* u = new SimpleImage (PIX_FMT_YUV422P, libdcp::Size (150, 150), false);
*u = *s;
BOOST_CHECK_EQUAL (u->components(), 1);
BOOST_CHECK_EQUAL (u->stride()[0], 160);