* @param l Log to write to.
*/
DCPVideoFrame::DCPVideoFrame (
- shared_ptr<const Image> image, int f, int dcp_fps, int bw, shared_ptr<Log> l
+ shared_ptr<const Image> image, int f, Eyes eyes, int dcp_fps, int bw, shared_ptr<Log> l
)
: _image (image)
, _frame (f)
+ , _eyes (eyes)
, _frames_per_second (dcp_fps)
, _j2k_bandwidth (bw)
, _log (l)
);
/* Set the max image and component sizes based on frame_rate */
- int const max_cs_len = ((float) _j2k_bandwidth) / 8 / _frames_per_second;
+ int max_cs_len = ((float) _j2k_bandwidth) / 8 / _frames_per_second;
+ if (_eyes == EYES_LEFT || _eyes == EYES_RIGHT) {
+ /* In 3D we have only half the normal bandwidth per eye */
+ max_cs_len /= 2;
+ }
int const max_comp_size = max_cs_len / 1.25;
/* get a J2K compressor handle */
socket->connect (*endpoint_iterator);
stringstream s;
- s << N_("encode please\n")
- << N_("width ") << _image->size().width << N_("\n")
- << N_("height ") << _image->size().height << N_("\n")
- << N_("frame ") << _frame << N_("\n")
- << N_("frames_per_second ") << _frames_per_second << N_("\n")
- << N_("j2k_bandwidth ") << _j2k_bandwidth << N_("\n");
+ s << "encode please\n"
+ << "width " << _image->size().width << "\n"
+ << "height " << _image->size().height << "\n"
+ << "eyes " << static_cast<int> (_eyes) << "\n"
+ << "frame " << _frame << "\n"
+ << "frames_per_second " << _frames_per_second << "\n"
+ << "j2k_bandwidth " << _j2k_bandwidth << "\n";
_log->log (String::compose (
N_("Sending to remote; pixel format %1, components %2, lines (%3,%4,%5), line sizes (%6,%7,%8)"),
* @param frame DCP frame index.
*/
void
-EncodedData::write (shared_ptr<const Film> film, int frame) const
+EncodedData::write (shared_ptr<const Film> film, int frame, Eyes eyes) const
{
- string const tmp_j2c = film->j2c_path (frame, true);
+ string const tmp_j2c = film->j2c_path (frame, eyes, true);
FILE* f = fopen (tmp_j2c.c_str (), N_("wb"));
fwrite (_data, 1, _size, f);
fclose (f);
- string const real_j2c = film->j2c_path (frame, false);
+ string const real_j2c = film->j2c_path (frame, eyes, false);
/* Rename the file from foo.j2c.tmp to foo.j2c now that it is complete */
boost::filesystem::rename (tmp_j2c, real_j2c);
}
void
-EncodedData::write_info (shared_ptr<const Film> film, int frame, libdcp::FrameInfo fin) const
+EncodedData::write_info (shared_ptr<const Film> film, int frame, Eyes eyes, libdcp::FrameInfo fin) const
{
- string const info = film->info_path (frame);
+ string const info = film->info_path (frame, eyes);
ofstream h (info.c_str());
fin.write (h);
}
virtual ~EncodedData ();
void send (boost::shared_ptr<Socket> socket);
- void write (boost::shared_ptr<const Film>, int) const;
- void write_info (boost::shared_ptr<const Film>, int, libdcp::FrameInfo) const;
+ void write (boost::shared_ptr<const Film>, int, Eyes) const;
+ void write_info (boost::shared_ptr<const Film>, int, Eyes, libdcp::FrameInfo) const;
/** @return data */
uint8_t* data () const {
class DCPVideoFrame : public boost::noncopyable
{
public:
- DCPVideoFrame (boost::shared_ptr<const Image>, int, int, int, boost::shared_ptr<Log>);
+ DCPVideoFrame (boost::shared_ptr<const Image>, int, Eyes, int, int, boost::shared_ptr<Log>);
boost::shared_ptr<EncodedData> encode_locally ();
boost::shared_ptr<EncodedData> encode_remotely (ServerDescription const *);
+ Eyes eyes () const {
+ return _eyes;
+ }
+
int frame () const {
return _frame;
}
private:
boost::shared_ptr<const Image> _image;
int _frame; ///< frame index within the DCP's intrinsic duration
+ Eyes _eyes;
int _frames_per_second; ///< Frames per second that we will use for the DCP
int _j2k_bandwidth; ///< J2K bandwidth to use
: _film (f)
, _job (j)
, _video_frames_out (0)
- , _have_a_real_frame (false)
, _terminate (false)
{
-
+ _have_a_real_frame[EYES_BOTH] = false;
+ _have_a_real_frame[EYES_LEFT] = false;
+ _have_a_real_frame[EYES_RIGHT] = false;
}
Encoder::~Encoder ()
for (list<shared_ptr<DCPVideoFrame> >::iterator i = _queue.begin(); i != _queue.end(); ++i) {
_film->log()->log (String::compose (N_("Encode left-over frame %1"), (*i)->frame ()));
try {
- _writer->write ((*i)->encode_locally(), (*i)->frame ());
+ _writer->write ((*i)->encode_locally(), (*i)->frame (), (*i)->eyes ());
frame_done ();
} catch (std::exception& e) {
_film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
}
void
-Encoder::process_video (shared_ptr<const Image> image, bool same)
+Encoder::process_video (shared_ptr<const Image> image, Eyes eyes, bool same)
{
boost::mutex::scoped_lock lock (_mutex);
}
if (_writer->can_fake_write (_video_frames_out)) {
- _writer->fake_write (_video_frames_out);
- _have_a_real_frame = false;
+ _writer->fake_write (_video_frames_out, eyes);
+ _have_a_real_frame[eyes] = false;
frame_done ();
- } else if (same && _have_a_real_frame) {
+ } else if (same && _have_a_real_frame[eyes]) {
/* Use the last frame that we encoded. */
- _writer->repeat (_video_frames_out);
+ _writer->repeat (_video_frames_out, eyes);
frame_done ();
} else {
/* Queue this new frame for encoding */
TIMING ("adding to queue of %1", _queue.size ());
_queue.push_back (shared_ptr<DCPVideoFrame> (
new DCPVideoFrame (
- image, _video_frames_out, _film->dcp_video_frame_rate(),
+ image, _video_frames_out, eyes, _film->dcp_video_frame_rate(),
_film->j2k_bandwidth(), _film->log()
)
));
_condition.notify_all ();
- _have_a_real_frame = true;
+ _have_a_real_frame[eyes] = true;
}
++_video_frames_out;
}
if (encoded) {
- _writer->write (encoded, vf->frame ());
+ _writer->write (encoded, vf->frame (), vf->eyes ());
frame_done ();
} else {
lock.lock ();
* @param i Video frame image.
* @param same true if i is the same as the last time we were called.
*/
- void process_video (boost::shared_ptr<const Image> i, bool same);
+ void process_video (boost::shared_ptr<const Image> i, Eyes eyes, bool same);
/** Call with some audio data */
void process_audio (boost::shared_ptr<const AudioBuffers>);
/** Number of video frames written for the DCP so far */
int _video_frames_out;
- bool _have_a_real_frame;
+ bool _have_a_real_frame[EYES_COUNT];
bool _terminate;
std::list<boost::shared_ptr<DCPVideoFrame> > _queue;
std::list<boost::thread *> _threads;
FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio)
: Decoder (f)
- , VideoDecoder (f)
+ , VideoDecoder (f, c)
, AudioDecoder (f)
, SubtitleDecoder (f)
, FFmpeg (c)
, _dci_metadata (Config::instance()->default_dci_metadata ())
, _dcp_video_frame_rate (24)
, _dcp_audio_channels (MAX_AUDIO_CHANNELS)
+ , _dcp_3d (false)
, _sequence_video (true)
, _dirty (false)
{
<< "_" << scaler()->id()
<< "_" << j2k_bandwidth();
+ if (_dcp_3d) {
+ s << "_3D";
+ }
+
return s.str ();
}
root->add_child("DCPVideoFrameRate")->add_child_text (lexical_cast<string> (_dcp_video_frame_rate));
root->add_child("DCIDate")->add_child_text (boost::gregorian::to_iso_string (_dci_date));
root->add_child("DCPAudioChannels")->add_child_text (lexical_cast<string> (_dcp_audio_channels));
+ root->add_child("DCP3D")->add_child_text (_dcp_3d ? "1" : "0");
root->add_child("SequenceVideo")->add_child_text (_sequence_video ? "1" : "0");
_playlist->as_xml (root->add_child ("Playlist"));
d << "_" << dcp_content_type()->dci_name();
}
+ if (dcp_3d ()) {
+ d << "-3D";
+ }
+
+ if (dcp_video_frame_rate() != 24) {
+ d << "-" << dcp_video_frame_rate();
+ }
+
if (container()) {
d << "_" << container()->dci_name();
}
signal_changed (DCP_AUDIO_CHANNELS);
}
+void
+Film::set_dcp_3d (bool t)
+{
+ {
+ boost::mutex::scoped_lock lm (_state_mutex);
+ _dcp_3d = t;
+ }
+ signal_changed (DCP_3D);
+}
+
void
Film::signal_changed (Property p)
{
}
string
-Film::info_path (int f) const
+Film::info_path (int f, Eyes e) const
{
boost::filesystem::path p;
p /= info_dir ();
stringstream s;
s.width (8);
- s << setfill('0') << f << ".md5";
+ s << setfill('0') << f;
+
+ if (e == EYES_LEFT) {
+ s << ".L";
+ } else if (e == EYES_RIGHT) {
+ s << ".R";
+ }
+ s << ".md5";
+
p /= s.str();
/* info_dir() will already have added any initial bit of the path,
}
string
-Film::j2c_path (int f, bool t) const
+Film::j2c_path (int f, Eyes e, bool t) const
{
boost::filesystem::path p;
p /= "j2c";
stringstream s;
s.width (8);
- s << setfill('0') << f << ".j2c";
+ s << setfill('0') << f;
+
+ if (e == EYES_LEFT) {
+ s << ".L";
+ } else if (e == EYES_RIGHT) {
+ s << ".R";
+ }
+
+ s << ".j2c";
if (t) {
s << ".tmp";
Film (std::string d);
std::string info_dir () const;
- std::string j2c_path (int f, bool t) const;
- std::string info_path (int f) const;
+ std::string j2c_path (int, Eyes, bool) const;
+ std::string info_path (int, Eyes) const;
std::string internal_video_mxf_dir () const;
std::string internal_video_mxf_filename () const;
boost::filesystem::path audio_analysis_path (boost::shared_ptr<const AudioContent>) const;
DCI_METADATA,
DCP_VIDEO_FRAME_RATE,
DCP_AUDIO_CHANNELS,
- SEQUENCE_VIDEO
+ /** The setting of _dcp_3d has been changed */
+ DCP_3D,
+ SEQUENCE_VIDEO,
};
return _dcp_audio_channels;
}
+ bool dcp_3d () const {
+ boost::mutex::scoped_lock lm (_state_mutex);
+ return _dcp_3d;
+ }
+
bool sequence_video () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _sequence_video;
void set_dci_metadata (DCIMetadata);
void set_dcp_video_frame_rate (int);
void set_dcp_audio_channels (int);
+ void set_dcp_3d (bool);
void set_dci_date_today ();
void set_sequence_video (bool);
/** The date that we should use in a DCI name */
boost::gregorian::date _dci_date;
int _dcp_audio_channels;
+ /** If true, the DCP will be written in 3D mode; otherwise in 2D.
+ This will be regardless of what content is on the playlist.
+ */
+ bool _dcp_3d;
bool _sequence_video;
/** true if our state has changed since we last saved it */
}
void
-Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, bool same, VideoContent::Frame frame)
+Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
{
shared_ptr<Piece> piece = weak_piece.lock ();
if (!piece) {
_last_video = piece->content;
#endif
- Video (work_image, same, time);
+ Video (work_image, eyes, same, time);
time += TIME_HZ / _film->dcp_video_frame_rate();
if (frc.repeat) {
- Video (work_image, true, time);
+ Video (work_image, eyes, true, time);
time += TIME_HZ / _film->dcp_video_frame_rate();
}
if (fc) {
shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
- fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3));
+ fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
if (!id) {
id.reset (new StillImageDecoder (_film, ic));
- id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3));
+ id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
}
piece->decoder = id;
} else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
update_subtitle ();
Changed (frequent);
+ } else if (property == VideoContentProperty::VIDEO_FRAME_TYPE) {
+ cout << "vft change.\n";
+ Changed (frequent);
}
}
#endif
/* XXX: use same here */
- Video (_black_frame, false, _video_position);
+ Video (_black_frame, EYES_BOTH, false, _video_position);
_video_position += _film->video_frames_to_time (1);
}
/** Emitted when a video frame is ready.
* First parameter is the video image.
- * Second parameter is true if the image is the same as the last one that was emitted.
- * Third parameter is the time.
+ * Second parameter is the eye(s) that should see this image.
+ * Third parameter is true if the image is the same as the last one that was emitted.
+ * Fourth parameter is the time.
*/
- boost::signals2::signal<void (boost::shared_ptr<const Image>, bool, Time)> Video;
+ boost::signals2::signal<void (boost::shared_ptr<const Image>, Eyes, bool, Time)> Video;
/** Emitted when some audio data is ready */
boost::signals2::signal<void (boost::shared_ptr<const AudioBuffers>, Time)> Audio;
private:
friend class PlayerWrapper;
- void process_video (boost::weak_ptr<Piece>, boost::shared_ptr<const Image>, bool, VideoContent::Frame);
+ void process_video (boost::weak_ptr<Piece>, boost::shared_ptr<const Image>, Eyes, bool, VideoContent::Frame);
void process_audio (boost::weak_ptr<Piece>, boost::shared_ptr<const AudioBuffers>, AudioContent::Frame);
void process_subtitle (boost::weak_ptr<Piece>, boost::shared_ptr<Image>, dcpomatic::Rect<double>, Time, Time);
void setup_pieces ();
int frame = get_required_int (kv, "frame");
int frames_per_second = get_required_int (kv, "frames_per_second");
int j2k_bandwidth = get_required_int (kv, "j2k_bandwidth");
+ Eyes eyes = static_cast<Eyes> (get_required_int (kv, "eyes"));
shared_ptr<Image> image (new Image (PIX_FMT_RGB24, size, true));
image->read_from_socket (socket);
DCPVideoFrame dcp_video_frame (
- image, frame, frames_per_second, j2k_bandwidth, _log
+ image, frame, eyes, frames_per_second, j2k_bandwidth, _log
);
shared_ptr<EncodedData> encoded = dcp_video_frame.encode_locally ();
StillImageDecoder::StillImageDecoder (shared_ptr<const Film> f, shared_ptr<const StillImageContent> c)
: Decoder (f)
- , VideoDecoder (f)
+ , VideoDecoder (f, c)
, StillImage (c)
{
using boost::dynamic_pointer_cast;
static void
-video_proxy (weak_ptr<Encoder> encoder, shared_ptr<const Image> image, bool same)
+video_proxy (weak_ptr<Encoder> encoder, shared_ptr<const Image> image, Eyes eyes, bool same)
{
shared_ptr<Encoder> e = encoder.lock ();
if (e) {
- e->process_video (image, same);
+ e->process_video (image, eyes, same);
}
}
, _player (f->make_player ())
, _encoder (new Encoder (f, j))
{
- _player->Video.connect (bind (video_proxy, _encoder, _1, _2));
+ _player->Video.connect (bind (video_proxy, _encoder, _1, _2, _3));
_player->Audio.connect (bind (audio_proxy, _encoder, _1));
}
VIDEO_FRAME_TYPE_3D_LEFT_RIGHT
};
+enum Eyes
+{
+ EYES_BOTH,
+ EYES_LEFT,
+ EYES_RIGHT,
+ EYES_COUNT
+};
+
/** @struct Crop
* @brief A description of the crop of an image or video.
*/
struct Crop
{
Crop () : left (0), right (0), top (0), bottom (0) {}
+ Crop (int l, int r, int t, int b) : left (l), right (r), top (t), bottom (b) {}
/** Number of pixels to remove from the left-hand side */
int left;
using std::string;
using std::stringstream;
using std::setprecision;
+using std::cout;
using boost::shared_ptr;
using boost::lexical_cast;
using boost::optional;
{
{
boost::mutex::scoped_lock lm (_mutex);
- _video_frame_rate = t;
+ _video_frame_type = t;
}
signal_changed (VideoContentProperty::VIDEO_FRAME_TYPE);
using std::cout;
using boost::shared_ptr;
-VideoDecoder::VideoDecoder (shared_ptr<const Film> f)
+VideoDecoder::VideoDecoder (shared_ptr<const Film> f, shared_ptr<const VideoContent> c)
: Decoder (f)
+ , _video_content (c)
, _video_position (0)
{
void
VideoDecoder::video (shared_ptr<const Image> image, bool same, VideoContent::Frame frame)
{
- Video (image, same, frame);
+ switch (_video_content->video_frame_type ()) {
+ case VIDEO_FRAME_TYPE_2D:
+ Video (image, EYES_BOTH, same, frame);
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ {
+ int const half = image->size().width / 2;
+ Video (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, same, frame);
+ Video (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, same, frame);
+ break;
+ }
+ }
+
_video_position = frame + 1;
}
class VideoDecoder : public virtual Decoder
{
public:
- VideoDecoder (boost::shared_ptr<const Film>);
+ VideoDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const VideoContent>);
/** Seek so that the next pass() will yield (approximately) the requested frame.
* Pass accurate = true to try harder to get close to the request.
/** Emitted when a video frame is ready.
* First parameter is the video image.
- * Second parameter is true if the image is the same as the last one that was emitted.
- * Third parameter is the frame within our source.
+ * Second parameter is the eye(s) which should see this image.
+ * Third parameter is true if the image is the same as the last one that was emitted for this Eyes value.
+ * Fourth parameter is the frame within our source.
*/
- boost::signals2::signal<void (boost::shared_ptr<const Image>, bool, VideoContent::Frame)> Video;
+ boost::signals2::signal<void (boost::shared_ptr<const Image>, Eyes, bool, VideoContent::Frame)> Video;
protected:
void video (boost::shared_ptr<const Image>, bool, VideoContent::Frame);
+ boost::shared_ptr<const VideoContent> _video_content;
VideoContent::Frame _video_position;
};
}
void
-Writer::write (shared_ptr<const EncodedData> encoded, int frame)
+Writer::write (shared_ptr<const EncodedData> encoded, int frame, Eyes eyes)
{
boost::mutex::scoped_lock lock (_mutex);
qi.type = QueueItem::FULL;
qi.encoded = encoded;
qi.frame = frame;
+ qi.eyes = eyes;
_queue.push_back (qi);
++_queued_full_in_memory;
}
void
-Writer::fake_write (int frame)
+Writer::fake_write (int frame, Eyes eyes)
{
boost::mutex::scoped_lock lock (_mutex);
- ifstream ifi (_film->info_path (frame).c_str());
+ ifstream ifi (_film->info_path (frame, eyes).c_str());
libdcp::FrameInfo info (ifi);
QueueItem qi;
qi.type = QueueItem::FAKE;
qi.size = info.size;
qi.frame = frame;
+ qi.eyes = eyes;
_queue.push_back (qi);
_condition.notify_all ();
_sound_asset_writer->write (audio->data(), audio->frames());
}
+/** This must be called from Writer::thread() with an appropriate lock held */
+bool
+Writer::have_sequenced_image_at_queue_head () const
+{
+ if (_queue.empty ()) {
+ return false;
+ }
+
+ /* We assume that we will get either all 2D frames or all 3D frames, not a mixture */
+
+ bool const eyes_ok = (_queue.front().eyes == EYES_BOTH) ||
+ (_queue.front().eyes == EYES_LEFT && _last_written_eyes == EYES_RIGHT) ||
+ (_queue.front().eyes == EYES_RIGHT && _last_written_eyes == EYES_LEFT);
+
+ return _queue.front().frame == (_last_written_frame + 1) && eyes_ok;
+}
+
void
Writer::thread ()
try
_queue.sort ();
- if (_finish ||
- _queued_full_in_memory > _maximum_frames_in_memory ||
- (!_queue.empty() && _queue.front().frame == (_last_written_frame + 1))) {
-
+ if (_finish || _queued_full_in_memory > _maximum_frames_in_memory || have_sequenced_image_at_queue_head ()) {
break;
}
}
/* Write any frames that we can write; i.e. those that are in sequence */
- while (!_queue.empty() && _queue.front().frame == (_last_written_frame + 1)) {
+ while (have_sequenced_image_at_queue_head ()) {
QueueItem qi = _queue.front ();
_queue.pop_front ();
if (qi.type == QueueItem::FULL && qi.encoded) {
{
_film->log()->log (String::compose (N_("Writer FULL-writes %1 to MXF"), qi.frame));
if (!qi.encoded) {
- qi.encoded.reset (new EncodedData (_film->j2c_path (qi.frame, false)));
+ qi.encoded.reset (new EncodedData (_film->j2c_path (qi.frame, qi.eyes, false)));
}
libdcp::FrameInfo const fin = _picture_asset_writer->write (qi.encoded->data(), qi.encoded->size());
- qi.encoded->write_info (_film, qi.frame, fin);
+ qi.encoded->write_info (_film, qi.frame, qi.eyes, fin);
_last_written = qi.encoded;
++_full_written;
break;
{
_film->log()->log (String::compose (N_("Writer REPEAT-writes %1 to MXF"), qi.frame));
libdcp::FrameInfo const fin = _picture_asset_writer->write (_last_written->data(), _last_written->size());
- _last_written->write_info (_film, qi.frame, fin);
+ _last_written->write_info (_film, qi.frame, qi.eyes, fin);
++_repeat_written;
break;
}
lock.unlock ();
_film->log()->log (String::compose (N_("Writer full (awaiting %1); pushes %2 to disk"), _last_written_frame + 1, qi.frame));
- qi.encoded->write (_film, qi.frame);
+ qi.encoded->write (_film, qi.frame, qi.eyes);
lock.lock ();
qi.encoded.reset ();
--_queued_full_in_memory;
/** Tell the writer that frame `f' should be a repeat of the frame before it */
void
-Writer::repeat (int f)
+Writer::repeat (int f, Eyes e)
{
boost::mutex::scoped_lock lock (_mutex);
QueueItem qi;
qi.type = QueueItem::REPEAT;
qi.frame = f;
+ qi.eyes = e;
_queue.push_back (qi);
_condition.notify_all ();
}
+bool
+Writer::check_existing_picture_mxf_frame (FILE* mxf, int f, Eyes eyes)
+{
+ /* Read the frame info as written */
+ ifstream ifi (_film->info_path (f, eyes).c_str());
+ libdcp::FrameInfo info (ifi);
+
+ /* Read the data from the MXF and hash it */
+ fseek (mxf, info.offset, SEEK_SET);
+ EncodedData data (info.size);
+ size_t const read = fread (data.data(), 1, data.size(), mxf);
+ if (read != static_cast<size_t> (data.size ())) {
+ _film->log()->log (String::compose ("Existing frame %1 is incomplete", f));
+ return false;
+ }
+
+ string const existing_hash = md5_digest (data.data(), data.size());
+ if (existing_hash != info.hash) {
+ _film->log()->log (String::compose ("Existing frame %1 failed hash check", f));
+ return false;
+ }
+
+ return true;
+}
void
Writer::check_existing_picture_mxf ()
while (1) {
- /* Read the frame info as written */
- ifstream ifi (_film->info_path (_first_nonexistant_frame).c_str());
- libdcp::FrameInfo info (ifi);
-
- /* Read the data from the MXF and hash it */
- fseek (mxf, info.offset, SEEK_SET);
- EncodedData data (info.size);
- size_t const read = fread (data.data(), 1, data.size(), mxf);
- if (read != static_cast<size_t> (data.size ())) {
- _film->log()->log (String::compose ("Existing frame %1 is incomplete", _first_nonexistant_frame));
- break;
- }
-
- string const existing_hash = md5_digest (data.data(), data.size());
- if (existing_hash != info.hash) {
- _film->log()->log (String::compose ("Existing frame %1 failed hash check", _first_nonexistant_frame));
- break;
+ if (_film->dcp_3d ()) {
+ if (!check_existing_picture_mxf_frame (mxf, _first_nonexistant_frame, EYES_LEFT)) {
+ break;
+ }
+ if (!check_existing_picture_mxf_frame (mxf, _first_nonexistant_frame, EYES_RIGHT)) {
+ break;
+ }
+ } else {
+ if (!check_existing_picture_mxf_frame (mxf, _first_nonexistant_frame, EYES_BOTH)) {
+ break;
+ }
}
_film->log()->log (String::compose ("Have existing frame %1", _first_nonexistant_frame));
bool
operator< (QueueItem const & a, QueueItem const & b)
{
- return a.frame < b.frame;
+ if (a.frame != b.frame) {
+ return a.frame < b.frame;
+ }
+
+ return a.eyes == EYES_LEFT && b.eyes == EYES_RIGHT;
}
bool
operator== (QueueItem const & a, QueueItem const & b)
{
- return a.frame == b.frame;
+ return a.frame == b.frame && a.eyes == b.eyes;
}
#include <boost/thread.hpp>
#include <boost/thread/condition.hpp>
#include "exceptions.h"
+#include "types.h"
class Film;
class EncodedData;
int size;
/** frame index */
int frame;
+ Eyes eyes;
};
bool operator< (QueueItem const & a, QueueItem const & b);
bool can_fake_write (int) const;
- void write (boost::shared_ptr<const EncodedData>, int);
- void fake_write (int);
+ void write (boost::shared_ptr<const EncodedData>, int, Eyes);
+ void fake_write (int, Eyes);
void write (boost::shared_ptr<const AudioBuffers>);
- void repeat (int f);
+ void repeat (int f, Eyes);
void finish ();
private:
void thread ();
void check_existing_picture_mxf ();
+ bool check_existing_picture_mxf_frame (FILE *, int, Eyes);
+ bool have_sequenced_image_at_queue_head () const;
/** our Film */
boost::shared_ptr<const Film> _film;
boost::shared_ptr<const EncodedData> _last_written;
/** the index of the last written frame */
int _last_written_frame;
+ Eyes _last_written_eyes;
/** maximum number of frames to hold in memory, for when we are managing
ordering
*/
}
if (_grid->GetCellValue (ev.GetRow(), ev.GetCol()) == wxT("1")) {
- cout << "set " << ev.GetRow() << " " << ev.GetCol() << " to 0.\n";
_grid->SetCellValue (ev.GetRow(), ev.GetCol(), wxT("0"));
} else {
- cout << "set " << ev.GetRow() << " " << ev.GetCol() << " to 1.\n";
_grid->SetCellValue (ev.GetRow(), ev.GetCol(), wxT("1"));
}
_map = AudioMapping (_map.content_channels ());
- cout << "was: " << _map.dcp_to_content(libdcp::CENTRE).size() << "\n";
for (int i = 0; i < _grid->GetNumberRows(); ++i) {
for (int j = 1; j < _grid->GetNumberCols(); ++j) {
}
}
- cout << "changed: " << _map.dcp_to_content(libdcp::CENTRE).size() << "\n";
Changed (_map);
}
grid->Add (_dcp_audio_channels, wxGBPosition (r, 1));
++r;
+ _dcp_3d = new wxCheckBox (_dcp_panel, wxID_ANY, _("3D"));
+ grid->Add (_dcp_3d, wxGBPosition (r, 0), wxGBSpan (1, 2));
+ ++r;
+
add_label_to_grid_bag_sizer (grid, _dcp_panel, _("Resolution"), true, wxGBPosition (r, 0));
_dcp_resolution = new wxChoice (_dcp_panel, wxID_ANY);
grid->Add (_dcp_resolution, wxGBPosition (r, 1));
_j2k_bandwidth->Connect (wxID_ANY, wxEVT_COMMAND_SPINCTRL_UPDATED, wxCommandEventHandler (FilmEditor::j2k_bandwidth_changed), 0, this);
_dcp_resolution->Connect (wxID_ANY, wxEVT_COMMAND_CHOICE_SELECTED, wxCommandEventHandler (FilmEditor::dcp_resolution_changed), 0, this);
_sequence_video->Connect (wxID_ANY, wxEVT_COMMAND_CHECKBOX_CLICKED, wxCommandEventHandler (FilmEditor::sequence_video_changed), 0, this);
+ _dcp_3d->Bind (wxEVT_COMMAND_CHECKBOX_CLICKED, boost::bind (&FilmEditor::dcp_3d_changed, this));
}
void
case Film::SEQUENCE_VIDEO:
checked_set (_sequence_video, _film->sequence_video ());
break;
+ case Film::DCP_3D:
+ checked_set (_dcp_3d, _film->dcp_3d ());
+ break;
}
}
film_changed (Film::DCP_VIDEO_FRAME_RATE);
film_changed (Film::DCP_AUDIO_CHANNELS);
film_changed (Film::SEQUENCE_VIDEO);
+ film_changed (Film::DCP_3D);
if (!_film->content().empty ()) {
set_selection (_film->content().front ());
_sequence_video->Enable (s);
_dcp_resolution->Enable (s);
_scaler->Enable (s);
+ _dcp_3d->Enable (s);
/* Set the panels in the content notebook */
for (list<FilmEditorPanel*>::iterator i = _panels.begin(); i != _panels.end(); ++i) {
cl.push_back (selected_content ());
_menu.popup (cl, ev.GetPoint ());
}
+
+void
+FilmEditor::dcp_3d_changed ()
+{
+ if (!_film) {
+ return;
+ }
+
+ _film->set_dcp_3d (_dcp_3d->GetValue ());
+}
void dcp_resolution_changed (wxCommandEvent &);
void sequence_video_changed (wxCommandEvent &);
void content_right_click (wxListEvent &);
+ void dcp_3d_changed ();
/* Handle changes to the model */
void film_changed (Film::Property);
wxChoice* _dcp_frame_rate;
wxSpinCtrl* _dcp_audio_channels;
wxButton* _best_dcp_frame_rate;
+ wxCheckBox* _dcp_3d;
wxChoice* _dcp_resolution;
ContentMenu _menu;
_player = f->make_player ();
_player->disable_audio ();
- _player->Video.connect (boost::bind (&FilmViewer::process_video, this, _1, _3));
+ _player->Video.connect (boost::bind (&FilmViewer::process_video, this, _1, _2, _4));
_player->Changed.connect (boost::bind (&FilmViewer::player_changed, this, _1));
calculate_sizes ();
}
void
-FilmViewer::process_video (shared_ptr<const Image> image, Time t)
+FilmViewer::process_video (shared_ptr<const Image> image, Eyes eyes, Time t)
{
+ if (eyes == EYES_RIGHT) {
+ return;
+ }
+
if (_got_frame) {
/* This is an additional frame emitted by a single pass. Store it. */
_queue.push_front (make_pair (image, t));
_got_frame = false;
if (!_queue.empty ()) {
- process_video (_queue.back().first, _queue.back().second);
+ process_video (_queue.back().first, EYES_BOTH, _queue.back().second);
_queue.pop_back ();
} else {
try {
void slider_moved (wxScrollEvent &);
void play_clicked (wxCommandEvent &);
void timer (wxTimerEvent &);
- void process_video (boost::shared_ptr<const Image>, Time);
+ void process_video (boost::shared_ptr<const Image>, Eyes, Time);
void calculate_sizes ();
void check_play_state ();
void fetch_current_frame_again ();
using std::vector;
using std::string;
using std::pair;
+using std::cout;
using boost::shared_ptr;
using boost::dynamic_pointer_cast;
using boost::bind;
_frame_type->Append (_("2D"));
_frame_type->Append (_("3D left/right"));
- _frame_type->Bind (wxEVT_COMMAND_SPINCTRL_UPDATED, bind (&VideoPanel::frame_type_changed, this));
+ _frame_type->Bind (wxEVT_COMMAND_CHOICE_SELECTED, bind (&VideoPanel::frame_type_changed, this));
_left_crop->Connect (wxID_ANY, wxEVT_COMMAND_SPINCTRL_UPDATED, wxCommandEventHandler (VideoPanel::left_crop_changed), 0, this);
_right_crop->Connect (wxID_ANY, wxEVT_COMMAND_SPINCTRL_UPDATED, wxCommandEventHandler (VideoPanel::right_crop_changed), 0, this);
_top_crop->Connect (wxID_ANY, wxEVT_COMMAND_SPINCTRL_UPDATED, wxCommandEventHandler (VideoPanel::top_crop_changed), 0, this);
shared_ptr<FFmpegContent> fc = dynamic_pointer_cast<FFmpegContent> (c);
if (property == VideoContentProperty::VIDEO_FRAME_TYPE) {
- checked_set (_frame_type, vc->video_frame_type ());
+ checked_set (_frame_type, vc ? vc->video_frame_type () : VIDEO_FRAME_TYPE_2D);
} else if (property == VideoContentProperty::VIDEO_CROP) {
checked_set (_left_crop, vc ? vc->crop().left : 0);
checked_set (_right_crop, vc ? vc->crop().right : 0);
new DCPVideoFrame (
image,
0,
+ EYES_BOTH,
24,
200000000,
log