Previously we asked libdcp whether an imported J2K file was
RGB or XYZ. The answer it gives is sometimes wrong, for reasons
that are not clear (either the files are not marked correctly,
or openjpeg is not parsing whatever metadata correctly).
However it seems that, in general, we use the user's specified
colour conversion to decide what to do with an image, rather than
asking the image what should be done to it.
Hence it makes more sense to assume that if a user specifies no
colour conversion for a J2K file then the file is XYZ.
With preview, the colour conversion from XYZ back to RGB is done
by FFmpeg, so we have to set the pixel format correctly on the
Image that comes back from J2KImageProxy. Now we get that pixel
format from the configured colourspace conversion rather than
from openjpeg's guess as to the file's colourspace.
It's a bit ugly that the only thing we ask the file about is whether
or not it is in YUV (which governs whether or not FFmpeg applies
the user's configured YUV-to-RGB conversion). Everything else is
decided by the configured conversion.
I think there's still some uglyness in here that I can't put my
finger on.
+2016-06-29 Carl Hetherington <cth@carlh.net>
+
+ * Obey specified colour conversion when previewing
+ RGB and XYZ JPEG2000 files.
+
2016-06-24 Carl Hetherington <cth@carlh.net>
* Version 2.8.14 released.
2016-06-24 Carl Hetherington <cth@carlh.net>
* Version 2.8.14 released.
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
if (_mono_reader) {
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
if (_mono_reader) {
- video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size())), _offset + frame);
+ video->give (
+ shared_ptr<ImageProxy> (
+ new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size(), AV_PIX_FMT_XYZ12LE)
+ ),
+ _offset + frame
+ );
- shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT)),
+ shared_ptr<ImageProxy> (
+ new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)),
_offset + frame
);
video->give (
_offset + frame
);
video->give (
- shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT)),
+ shared_ptr<ImageProxy> (
+ new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)),
/* Either we need an image or we are using moving images, so load one */
boost::filesystem::path path = _image_content->path (_image_content->still() ? 0 : _video_position);
if (valid_j2k_file (path)) {
/* Either we need an image or we are using moving images, so load one */
boost::filesystem::path path = _image_content->path (_image_content->still() ? 0 : _video_position);
if (valid_j2k_file (path)) {
+ AVPixelFormat pf;
+ if (_image_content->video->colour_conversion()) {
+ /* We have a specified colour conversion: assume the image is RGB */
+ pf = AV_PIX_FMT_RGB48LE;
+ } else {
+ /* No specified colour conversion: assume the image is XYZ */
+ pf = AV_PIX_FMT_XYZ12LE;
+ }
/* We can't extract image size from a JPEG2000 codestream without decoding it,
so pass in the image content's size here.
*/
/* We can't extract image size from a JPEG2000 codestream without decoding it,
so pass in the image content's size here.
*/
- _image.reset (new J2KImageProxy (path, _image_content->video->size ()));
+ _image.reset (new J2KImageProxy (path, _image_content->video->size(), pf));
} else {
_image.reset (new MagickImageProxy (path));
}
} else {
_image.reset (new MagickImageProxy (path));
}
using dcp::Data;
/** Construct a J2KImageProxy from a JPEG2000 file */
using dcp::Data;
/** Construct a J2KImageProxy from a JPEG2000 file */
-J2KImageProxy::J2KImageProxy (boost::filesystem::path path, dcp::Size size)
+J2KImageProxy::J2KImageProxy (boost::filesystem::path path, dcp::Size size, AVPixelFormat pixel_format)
: _data (path)
, _size (size)
: _data (path)
, _size (size)
+ , _pixel_format (pixel_format)
-J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size size)
+J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size size, AVPixelFormat pixel_format)
: _data (frame->j2k_size ())
, _size (size)
: _data (frame->j2k_size ())
, _size (size)
+ , _pixel_format (pixel_format)
{
memcpy (_data.data().get(), frame->j2k_data(), _data.size ());
}
{
memcpy (_data.data().get(), frame->j2k_data(), _data.size ());
}
-J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size size, dcp::Eye eye)
+J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size size, dcp::Eye eye, AVPixelFormat pixel_format)
: _size (size)
, _eye (eye)
: _size (size)
, _eye (eye)
+ , _pixel_format (pixel_format)
{
switch (eye) {
case dcp::EYE_LEFT:
{
switch (eye) {
case dcp::EYE_LEFT:
_eye = static_cast<dcp::Eye> (xml->number_child<int> ("Eye"));
}
_data = Data (xml->number_child<int> ("Size"));
_eye = static_cast<dcp::Eye> (xml->number_child<int> ("Eye"));
}
_data = Data (xml->number_child<int> ("Size"));
+ /* This only matters when we are using J2KImageProxy for the preview, which
+ will never use this constructor (which is only used for passing data to
+ encode servers). So we can put anything in here. It's a bit of a hack.
+ */
+ _pixel_format = AV_PIX_FMT_XYZ12LE;
socket->read (_data.data().get (), _data.size ());
}
socket->read (_data.data().get (), _data.size ());
}
- shared_ptr<Image> image (new Image (pixel_format(), _size, true));
+ shared_ptr<Image> image (new Image (_pixel_format, _size, true));
/* Copy data in whatever format (sRGB or XYZ) into our Image; I'm assuming
the data is 12-bit either way.
/* Copy data in whatever format (sRGB or XYZ) into our Image; I'm assuming
the data is 12-bit either way.
return memcmp (_data.data().get(), jp->_data.data().get(), _data.size()) == 0;
}
return memcmp (_data.data().get(), jp->_data.data().get(), _data.size()) == 0;
}
-AVPixelFormat
-J2KImageProxy::pixel_format () const
-{
- ensure_j2k ();
-
- if (_j2k->srgb ()) {
- return AV_PIX_FMT_RGB48LE;
- }
-
- return AV_PIX_FMT_XYZ12LE;
-}
-
-J2KImageProxy::J2KImageProxy (Data data, dcp::Size size)
+J2KImageProxy::J2KImageProxy (Data data, dcp::Size size, AVPixelFormat pixel_format)
: _data (data)
, _size (size)
: _data (data)
, _size (size)
+ , _pixel_format (pixel_format)
class J2KImageProxy : public ImageProxy
{
public:
class J2KImageProxy : public ImageProxy
{
public:
- J2KImageProxy (boost::filesystem::path path, dcp::Size);
- J2KImageProxy (boost::shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size);
- J2KImageProxy (boost::shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size, dcp::Eye);
+ J2KImageProxy (boost::filesystem::path path, dcp::Size, AVPixelFormat pixel_format);
+ J2KImageProxy (boost::shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size, AVPixelFormat pixel_format);
+ J2KImageProxy (boost::shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size, dcp::Eye, AVPixelFormat pixel_format);
J2KImageProxy (boost::shared_ptr<cxml::Node> xml, boost::shared_ptr<Socket> socket);
boost::shared_ptr<Image> image (boost::optional<dcp::NoteHandler> note = boost::optional<dcp::NoteHandler> ()) const;
J2KImageProxy (boost::shared_ptr<cxml::Node> xml, boost::shared_ptr<Socket> socket);
boost::shared_ptr<Image> image (boost::optional<dcp::NoteHandler> note = boost::optional<dcp::NoteHandler> ()) const;
void send_binary (boost::shared_ptr<Socket>) const;
/** @return true if our image is definitely the same as another, false if it is probably not */
bool same (boost::shared_ptr<const ImageProxy>) const;
void send_binary (boost::shared_ptr<Socket>) const;
/** @return true if our image is definitely the same as another, false if it is probably not */
bool same (boost::shared_ptr<const ImageProxy>) const;
- AVPixelFormat pixel_format () const;
+ AVPixelFormat pixel_format () const {
+ return _pixel_format;
+ }
dcp::Data j2k () const {
return _data;
dcp::Data j2k () const {
return _data;
friend struct client_server_test_j2k;
/* For tests */
friend struct client_server_test_j2k;
/* For tests */
- J2KImageProxy (dcp::Data data, dcp::Size size);
+ J2KImageProxy (dcp::Data data, dcp::Size size, AVPixelFormat pixel_format);
void ensure_j2k () const;
dcp::Data _data;
dcp::Size _size;
boost::optional<dcp::Eye> _eye;
mutable boost::shared_ptr<dcp::OpenJPEGImage> _j2k;
void ensure_j2k () const;
dcp::Data _data;
dcp::Size _size;
boost::optional<dcp::Eye> _eye;
mutable boost::shared_ptr<dcp::OpenJPEGImage> _j2k;
+ AVPixelFormat _pixel_format;
property == SubtitleContentProperty::COLOUR ||
property == SubtitleContentProperty::OUTLINE ||
property == SubtitleContentProperty::OUTLINE_COLOUR ||
property == SubtitleContentProperty::COLOUR ||
property == SubtitleContentProperty::OUTLINE ||
property == SubtitleContentProperty::OUTLINE_COLOUR ||
- property == FFmpegContentProperty::SUBTITLE_STREAM
+ property == FFmpegContentProperty::SUBTITLE_STREAM ||
+ property == VideoContentProperty::COLOUR_CONVERSION
) {
_have_valid_pieces = false;
) {
_have_valid_pieces = false;
property == VideoContentProperty::CROP ||
property == VideoContentProperty::SCALE ||
property == VideoContentProperty::FADE_IN ||
property == VideoContentProperty::CROP ||
property == VideoContentProperty::SCALE ||
property == VideoContentProperty::FADE_IN ||
- property == VideoContentProperty::FADE_OUT ||
- property == VideoContentProperty::COLOUR_CONVERSION
+ property == VideoContentProperty::FADE_OUT
- video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_mono_reader->get_frame(frame), _size)), frame);
+ video->give (
+ shared_ptr<ImageProxy> (new J2KImageProxy (_mono_reader->get_frame(frame), _size, AV_PIX_FMT_XYZ12LE)), frame
+ );
- video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_LEFT)), frame);
- video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_RIGHT)), frame);
+ video->give (
+ shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)), frame
+ );
+ video->give (
+ shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)), frame
+ );
}
_next += ContentTime::from_frames (1, vfr);
}
_next += ContentTime::from_frames (1, vfr);
+ /* In an ideal world, what we would do here is:
+ *
+ * 1. convert to XYZ exactly as we do in the DCP creation path.
+ * 2. convert back to RGB for the preview display, compensating
+ * for the monitor etc. etc.
+ *
+ * but this is inefficient if the source is RGB. Since we don't
+ * (currently) care too much about the precise accuracy of the preview's
+ * colour mapping (and we care more about its speed) we try to short-
+ * circuit this "ideal" situation in some cases.
+ *
+ * The content's specified colour conversion indicates the colourspace
+ * which the content is in (according to the user).
+ *
+ * PlayerVideo::image (bound to PlayerVideo::always_rgb) will take the source
+ * image and convert it (from whatever the user has said it is) to RGB.
+ */
+
- bind (&Log::dcp_log, _film->log().get(), _1, _2), bind (&PlayerVideo::always_rgb, _1), false, true
+ bind (&Log::dcp_log, _film->log().get(), _1, _2),
+ bind (&PlayerVideo::always_rgb, _1),
+ false, true
shared_ptr<PlayerVideo> j2k_pvf (
new PlayerVideo (
shared_ptr<PlayerVideo> j2k_pvf (
new PlayerVideo (
- shared_ptr<ImageProxy> (new J2KImageProxy (raw_locally_encoded, dcp::Size (1998, 1080))),
+ shared_ptr<ImageProxy> (new J2KImageProxy (raw_locally_encoded, dcp::Size (1998, 1080), AV_PIX_FMT_XYZ12LE)),
DCPTime (),
Crop (),
optional<double> (),
DCPTime (),
Crop (),
optional<double> (),