+2014-11-10 Carl Hetherington <cth@carlh.net>
+
+ * Guess initial scale from the size of video
+ content images, taking pixel aspect ratio into
+ account where possible.
+
2014-11-07 c.hetherington <cth@carlh.net>
* Add a hint if there is 3D content in a proposed 2D DCP.
, _cinema_sound_processor (CinemaSoundProcessor::from_id (N_("dolby_cp750")))
, _allow_any_dcp_frame_rate (false)
, _default_still_length (10)
- , _default_scale (VideoContentScale (Ratio::from_id ("185")))
, _default_container (Ratio::from_id ("185"))
, _default_dcp_content_type (DCPContentType::from_isdcf_name ("FTR"))
, _default_j2k_bandwidth (100000000)
_language = f.optional_string_child ("Language");
- c = f.optional_string_child ("DefaultScale");
- if (c) {
- _default_scale = VideoContentScale::from_id (c.get ());
- }
-
c = f.optional_string_child ("DefaultContainer");
if (c) {
_default_container = Ratio::from_id (c.get ());
if (_language) {
root->add_child("Language")->add_child_text (_language.get());
}
- root->add_child("DefaultScale")->add_child_text (_default_scale.id ());
if (_default_container) {
root->add_child("DefaultContainer")->add_child_text (_default_container->id ());
}
return _default_still_length;
}
- VideoContentScale default_scale () const {
- return _default_scale;
- }
-
Ratio const * default_container () const {
return _default_container;
}
changed ();
}
- void set_default_scale (VideoContentScale s) {
- _default_scale = s;
- changed ();
- }
-
void set_default_container (Ratio const * c) {
_default_container = c;
changed ();
ISDCFMetadata _default_isdcf_metadata;
boost::optional<std::string> _language;
int _default_still_length;
- VideoContentScale _default_scale;
Ratio const * _default_container;
DCPContentType const * _default_dcp_content_type;
std::string _dcp_issuer;
return ContentTime (max (ContentTime::Type (1), length.get ()));
}
+optional<float>
+FFmpegExaminer::sample_aspect_ratio () const
+{
+ AVRational sar = av_guess_sample_aspect_ratio (_format_context, _format_context->streams[_video_stream], 0);
+ return float (sar.num) / sar.den;
+}
+
string
FFmpegExaminer::audio_stream_name (AVStream* s) const
{
float video_frame_rate () const;
dcp::Size video_size () const;
ContentTime video_length () const;
+ boost::optional<float> sample_aspect_ratio () const;
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > subtitle_streams () const {
return _subtitle_streams;
return *j;
}
+Ratio const *
+Ratio::nearest_from_ratio (float r)
+{
+ Ratio const * nearest = 0;
+ float distance = FLT_MAX;
+
+ for (vector<Ratio const *>::iterator i = _ratios.begin (); i != _ratios.end(); ++i) {
+ float const d = fabs ((*i)->ratio() - r);
+ if (d < distance) {
+ distance = d;
+ nearest = *i;
+ }
+ }
+
+ return nearest;
+}
static void setup_ratios ();
static Ratio const * from_id (std::string i);
static Ratio const * from_ratio (float r);
+ static Ratio const * nearest_from_ratio (float r);
static std::vector<Ratio const *> all () {
return _ratios;
}
, _video_length (0)
, _video_frame_rate (0)
, _video_frame_type (VIDEO_FRAME_TYPE_2D)
- , _scale (Config::instance()->default_scale ())
+ , _scale (VideoContentScale (Ratio::from_id ("178")))
{
set_default_colour_conversion (false);
}
, _video_length (len)
, _video_frame_rate (0)
, _video_frame_type (VIDEO_FRAME_TYPE_2D)
- , _scale (Config::instance()->default_scale ())
+ , _scale (VideoContentScale (Ratio::from_id ("178")))
{
set_default_colour_conversion (false);
}
, _video_length (0)
, _video_frame_rate (0)
, _video_frame_type (VIDEO_FRAME_TYPE_2D)
- , _scale (Config::instance()->default_scale ())
+ , _scale (VideoContentScale (Ratio::from_id ("178")))
{
set_default_colour_conversion (false);
}
}
_video_frame_type = static_cast<VideoFrameType> (node->number_child<int> ("VideoFrameType"));
+ _sample_aspect_ratio = node->optional_number_child<float> ("SampleAspectRatio");
_crop.left = node->number_child<int> ("LeftCrop");
_crop.right = node->number_child<int> ("RightCrop");
_crop.top = node->number_child<int> ("TopCrop");
node->add_child("VideoHeight")->add_child_text (raw_convert<string> (_video_size.height));
node->add_child("VideoFrameRate")->add_child_text (raw_convert<string> (_video_frame_rate));
node->add_child("VideoFrameType")->add_child_text (raw_convert<string> (static_cast<int> (_video_frame_type)));
+ if (_sample_aspect_ratio) {
+ node->add_child("SampleAspectRatio")->add_child_text (raw_convert<string> (_sample_aspect_ratio.get ()));
+ }
_crop.as_xml (node);
_scale.as_xml (node->add_child("Scale"));
if (_colour_conversion) {
dcp::Size const vs = d->video_size ();
float const vfr = d->video_frame_rate ();
ContentTime vl = d->video_length ();
+ optional<float> const ar = d->sample_aspect_ratio ();
{
boost::mutex::scoped_lock lm (_mutex);
_video_size = vs;
_video_frame_rate = vfr;
_video_length = vl;
+ _sample_aspect_ratio = ar;
+
+ /* Guess correct scale from size and sample aspect ratio */
+ _scale = VideoContentScale (
+ Ratio::nearest_from_ratio (float (_video_size.width) * ar.get_value_or (1) / _video_size.height)
+ );
}
shared_ptr<const Film> film = _film.lock ();
signal_changed (VideoContentProperty::VIDEO_SIZE);
signal_changed (VideoContentProperty::VIDEO_FRAME_RATE);
+ signal_changed (VideoContentProperty::VIDEO_SCALE);
signal_changed (ContentProperty::LENGTH);
}
video_size().height,
setprecision (3), video_size().ratio ()
);
+
+ if (sample_aspect_ratio ()) {
+ s << String::compose (_(" sample aspect ratio %1:1"), sample_aspect_ratio().get ());
+ }
return s.str ();
}
string
VideoContent::technical_summary () const
{
- return String::compose (
+ string s = String::compose (
"video: length %1, size %2x%3, rate %4",
video_length_after_3d_combine().seconds(),
video_size().width,
video_size().height,
video_frame_rate()
);
+
+ if (sample_aspect_ratio ()) {
+ s += String::compose (_(", sample aspect ratio %1"), (sample_aspect_ratio().get ()));
+ }
+
+ return s;
}
dcp::Size
return _colour_conversion;
}
+ boost::optional<float> sample_aspect_ratio () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _sample_aspect_ratio;
+ }
+
ContentTime fade_in () const {
boost::mutex::scoped_lock lm (_mutex);
return _fade_in;
Crop _crop;
VideoContentScale _scale;
boost::optional<ColourConversion> _colour_conversion;
+ /** Sample aspect ratio obtained from the content file's header,
+ if there is one.
+ */
+ boost::optional<float> _sample_aspect_ratio;
ContentTime _fade_in;
ContentTime _fade_out;
};
#include "video_decoder.h"
#include "image.h"
#include "image_proxy.h"
+#include "raw_image_proxy.h"
#include "content_video.h"
#include "i18n.h"
using std::cout;
using std::list;
using std::max;
+using std::back_inserter;
using boost::shared_ptr;
using boost::optional;
#endif
, _same (false)
{
-
+ _black_image.reset (new Image (PIX_FMT_RGB24, _video_content->video_size(), true));
+ _black_image->make_black ();
}
list<ContentVideo>
}
}
- /* Clean up _decoded_video; keep the frame we are returning, but nothing before that */
+ /* Clean up _decoded_video; keep the frame we are returning (which may have two images
+ for 3D), but nothing before that */
while (!_decoded_video.empty() && _decoded_video.front().frame < dec.front().frame) {
_decoded_video.pop_front ();
}
return dec;
}
+/** Fill _decoded_video up to, but not including, the specified frame */
+void
+VideoDecoder::fill_up_to_2d (VideoFrame frame)
+{
+ if (frame == 0) {
+ /* Already OK */
+ return;
+ }
-/** Called by subclasses when they have a video frame ready */
+ /* Fill with black... */
+ boost::shared_ptr<const ImageProxy> filler_image (new RawImageProxy (_black_image));
+ Part filler_part = PART_WHOLE;
+
+ /* ...unless there's some video we can fill with */
+ if (!_decoded_video.empty ()) {
+ filler_image = _decoded_video.back().image;
+ filler_part = _decoded_video.back().part;
+ }
+
+ VideoFrame filler_frame = _decoded_video.empty() ? 0 : (_decoded_video.back().frame + 1);
+ while (filler_frame < frame) {
+
+#ifdef DCPOMATIC_DEBUG
+ test_gaps++;
+#endif
+
+ _decoded_video.push_back (
+ ContentVideo (filler_image, EYES_BOTH, filler_part, filler_frame)
+ );
+
+ ++filler_frame;
+ }
+}
+
+/** Fill _decoded_video up to, but not including, the specified frame and eye */
void
-VideoDecoder::video (shared_ptr<const ImageProxy> image, VideoFrame frame)
+VideoDecoder::fill_up_to_3d (VideoFrame frame, Eyes eye)
{
- /* We may receive the same frame index twice for 3D, and we need to know
- when that happens.
- */
- _same = (!_decoded_video.empty() && frame == _decoded_video.back().frame);
+ if (frame == 0 && eye == EYES_LEFT) {
+ /* Already OK */
+ return;
+ }
+
+ /* Fill with black... */
+ boost::shared_ptr<const ImageProxy> filler_left_image (new RawImageProxy (_black_image));
+ boost::shared_ptr<const ImageProxy> filler_right_image (new RawImageProxy (_black_image));
+ Part filler_left_part = PART_WHOLE;
+ Part filler_right_part = PART_WHOLE;
+
+ /* ...unless there's some video we can fill with */
+ for (list<ContentVideo>::const_reverse_iterator i = _decoded_video.rbegin(); i != _decoded_video.rend(); ++i) {
+ if (i->eyes == EYES_LEFT && !filler_left_image) {
+ filler_left_image = i->image;
+ filler_left_part = i->part;
+ } else if (i->eyes == EYES_RIGHT && !filler_right_image) {
+ filler_right_image = i->image;
+ filler_right_part = i->part;
+ }
+
+ if (filler_left_image && filler_right_image) {
+ break;
+ }
+ }
+
+ VideoFrame filler_frame = _decoded_video.empty() ? 0 : _decoded_video.back().frame;
+ Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes;
+
+ if (_decoded_video.empty ()) {
+ filler_frame = 0;
+ filler_eye = EYES_LEFT;
+ } else if (_decoded_video.back().eyes == EYES_LEFT) {
+ filler_frame = _decoded_video.back().frame;
+ filler_eye = EYES_RIGHT;
+ } else if (_decoded_video.back().eyes == EYES_RIGHT) {
+ filler_frame = _decoded_video.back().frame + 1;
+ filler_eye = EYES_LEFT;
+ }
- /* Fill in gaps */
- /* XXX: 3D */
+ while (filler_frame != frame || filler_eye != eye) {
- while (!_decoded_video.empty () && (_decoded_video.back().frame + 1) < frame) {
#ifdef DCPOMATIC_DEBUG
test_gaps++;
#endif
+
_decoded_video.push_back (
ContentVideo (
- _decoded_video.back().image,
- _decoded_video.back().eyes,
- _decoded_video.back().part,
- _decoded_video.back().frame + 1
+ filler_eye == EYES_LEFT ? filler_left_image : filler_right_image,
+ filler_eye,
+ filler_eye == EYES_LEFT ? filler_left_part : filler_right_part,
+ filler_frame
)
);
+
+ if (filler_eye == EYES_LEFT) {
+ filler_eye = EYES_RIGHT;
+ } else {
+ filler_eye = EYES_LEFT;
+ ++filler_frame;
+ }
}
+}
+/** Called by subclasses when they have a video frame ready */
+void
+VideoDecoder::video (shared_ptr<const ImageProxy> image, VideoFrame frame)
+{
+ /* We may receive the same frame index twice for 3D, and we need to know
+ when that happens.
+ */
+ _same = (!_decoded_video.empty() && frame == _decoded_video.back().frame);
+
+ /* Work out what we are going to push into _decoded_video next */
+ list<ContentVideo> to_push;
switch (_video_content->video_frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
- _decoded_video.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- _decoded_video.push_back (ContentVideo (image, _same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, _same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
- _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame));
+ to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
+ to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame));
break;
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame));
- _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame));
+ to_push.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame));
+ to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame));
break;
case VIDEO_FRAME_TYPE_3D_LEFT:
- _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_RIGHT:
- _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
break;
default:
assert (false);
}
+
+ /* Now VideoDecoder is required never to have gaps in the frames that it presents
+ via get_video(). Hence we need to fill in any gap between the last thing in _decoded_video
+ and the things we are about to push.
+ */
+
+ if (_video_content->video_frame_type() == VIDEO_FRAME_TYPE_2D) {
+ fill_up_to_2d (to_push.front().frame);
+ } else {
+ fill_up_to_3d (to_push.front().frame, to_push.front().eyes);
+ }
+
+ copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video));
}
void
class VideoContent;
class ImageProxy;
+class Image;
/** @class VideoDecoder
* @brief Parent for classes which decode video.
#endif
protected:
+ friend struct video_decoder_fill_test1;
+ friend struct video_decoder_fill_test2;
void seek (ContentTime time, bool accurate);
void video (boost::shared_ptr<const ImageProxy>, VideoFrame frame);
std::list<ContentVideo> decoded_video (VideoFrame frame);
+ void fill_up_to_2d (VideoFrame);
+ void fill_up_to_3d (VideoFrame, Eyes);
boost::shared_ptr<const VideoContent> _video_content;
std::list<ContentVideo> _decoded_video;
bool _same;
+ boost::shared_ptr<Image> _black_image;
};
#endif
virtual float video_frame_rate () const = 0;
virtual dcp::Size video_size () const = 0;
virtual ContentTime video_length () const = 0;
+ virtual boost::optional<float> sample_aspect_ratio () const {
+ return boost::optional<float> ();
+ }
};
_isdcf_metadata_button = new wxButton (panel, wxID_ANY, _("Edit..."));
table->Add (_isdcf_metadata_button);
- add_label_to_sizer (table, panel, _("Default scale to"), true);
- _scale = new wxChoice (panel, wxID_ANY);
- table->Add (_scale);
-
add_label_to_sizer (table, panel, _("Default container"), true);
_container = new wxChoice (panel, wxID_ANY);
table->Add (_container);
_isdcf_metadata_button->Bind (wxEVT_COMMAND_BUTTON_CLICKED, boost::bind (&DefaultsPage::edit_isdcf_metadata_clicked, this, parent));
- vector<VideoContentScale> scales = VideoContentScale::all ();
- for (size_t i = 0; i < scales.size(); ++i) {
- _scale->Append (std_to_wx (scales[i].name ()));
- if (scales[i] == config->default_scale ()) {
- _scale->SetSelection (i);
- }
- }
-
vector<Ratio const *> ratios = Ratio::all ();
for (size_t i = 0; i < ratios.size(); ++i) {
_container->Append (std_to_wx (ratios[i]->nickname ()));
}
}
- _scale->Bind (wxEVT_COMMAND_CHOICE_SELECTED, boost::bind (&DefaultsPage::scale_changed, this));
_container->Bind (wxEVT_COMMAND_CHOICE_SELECTED, boost::bind (&DefaultsPage::container_changed, this));
vector<DCPContentType const *> const ct = DCPContentType::all ();
Config::instance()->set_default_still_length (_still_length->GetValue ());
}
- void scale_changed ()
- {
- vector<VideoContentScale> scale = VideoContentScale::all ();
- Config::instance()->set_default_scale (scale[_scale->GetSelection()]);
- }
-
void container_changed ()
{
vector<Ratio const *> ratio = Ratio::all ();
#else
wxDirPickerCtrl* _directory;
#endif
- wxChoice* _scale;
wxChoice* _container;
wxChoice* _dcp_content_type;
wxTextCtrl* _issuer;
threed_test.cc
upmixer_a_test.cc
util_test.cc
+ video_decoder_fill_test.cc
xml_subtitle_test.cc
"""