}
void
-ABTranscoder::process_video (shared_ptr<Image> yuv, int frame, int index)
+ABTranscoder::process_video (shared_ptr<Image> yuv, int frame, shared_ptr<Subtitle> sub, int index)
{
if (index == 0) {
/* Keep this image around until we get the other half */
}
/* And pass it to the encoder */
- _encoder->process_video (_image, frame);
+ _encoder->process_video (_image, frame, sub);
_image.reset ();
}
class Options;
class Image;
class Log;
+class Subtitle;
/** @class ABTranscoder
* @brief A transcoder which uses one FilmState for the left half of the screen, and a different one
void go ();
private:
- void process_video (boost::shared_ptr<Image>, int, int);
+ void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>, int);
boost::shared_ptr<const FilmState> _fs_a;
boost::shared_ptr<const FilmState> _fs_b;
* @param l Log to write to.
*/
DCPVideoFrame::DCPVideoFrame (
- shared_ptr<Image> yuv, Size out, int p, Scaler const * s, int f, float fps, string pp, int clut, int bw, Log* l)
+ shared_ptr<Image> yuv, shared_ptr<Subtitle> sub, Size out, int p, Scaler const * s, int f, float fps, string pp, int clut, int bw, Log* l)
: _input (yuv)
+ , _subtitle (sub)
, _out_size (out)
, _padding (p)
, _scaler (s)
<< Config::instance()->colour_lut_index () << " "
<< Config::instance()->j2k_bandwidth () << " ";
- for (int i = 0; i < _input->components(); ++i) {
- s << _input->line_size()[i] << " ";
- }
-
socket.write ((uint8_t *) s.str().c_str(), s.str().length() + 1, 30);
for (int i = 0; i < _input->components(); ++i) {
class Scaler;
class Image;
class Log;
+class Subtitle;
/** @class EncodedData
* @brief Container for J2K-encoded data.
class DCPVideoFrame
{
public:
- DCPVideoFrame (boost::shared_ptr<Image>, Size, int, Scaler const *, int, float, std::string, int, int, Log *);
+ DCPVideoFrame (boost::shared_ptr<Image>, boost::shared_ptr<Subtitle>, Size, int, Scaler const *, int, float, std::string, int, int, Log *);
virtual ~DCPVideoFrame ();
boost::shared_ptr<EncodedData> encode_locally ();
void write_encoded (boost::shared_ptr<const Options>, uint8_t *, int);
boost::shared_ptr<Image> _input; ///< the input image
+ boost::shared_ptr<Subtitle> _subtitle; ///< any subtitle that should be on the image
Size _out_size; ///< the required size of the output, in pixels
int _padding;
Scaler const * _scaler; ///< scaler to use
* @param frame to decode; caller manages memory.
*/
void
-Decoder::process_video (AVFrame* frame)
+Decoder::process_video (AVFrame* frame, shared_ptr<Subtitle> sub)
{
if (_minimal) {
++_video_frame;
image->make_black ();
}
- overlay (image);
-
TIMING ("Decoder emits %1", _video_frame);
- Video (image, _video_frame);
+ Video (image, _video_frame, sub);
++_video_frame;
}
}
class Image;
class Log;
class DelayLine;
+class Subtitle;
/** @class Decoder.
* @brief Parent class for decoders of content.
/** Emitted when a video frame is ready.
* First parameter is the frame.
* Second parameter is its index within the content.
+ * Third parameter is either 0 or a subtitle that should be on this frame.
*/
- sigc::signal<void, boost::shared_ptr<Image>, int> Video;
+ sigc::signal<void, boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle> > Video;
/** Emitted when some audio data is ready.
* First parameter is the interleaved sample data, format is given in the FilmState.
virtual int time_base_denominator () const = 0;
virtual int sample_aspect_ratio_numerator () const = 0;
virtual int sample_aspect_ratio_denominator () const = 0;
- virtual void overlay (boost::shared_ptr<Image> image) const {}
- void process_video (AVFrame *);
+ void process_video (AVFrame *, boost::shared_ptr<Subtitle>);
void process_audio (uint8_t *, int);
/** our FilmState */
class Options;
class Image;
class Log;
+class Subtitle;
/** @class Encoder
* @brief Parent class for classes which can encode video and audio frames.
/** Called with a frame of video.
* @param i Video frame image.
* @param f Frame number within the film.
+ * @param s A subtitle that should be on this frame, or 0.
*/
- virtual void process_video (boost::shared_ptr<Image> i, int f) = 0;
+ virtual void process_video (boost::shared_ptr<Image> i, int f, boost::shared_ptr<Subtitle> s) = 0;
/** Called with some audio data.
* @param d Data.
#include "util.h"
#include "log.h"
#include "ffmpeg_decoder.h"
+#include "subtitle.h"
using namespace std;
using namespace boost;
, _audio_codec (0)
, _subtitle_codec_context (0)
, _subtitle_codec (0)
- , _have_subtitle (false)
{
setup_general ();
setup_video ();
avcodec_close (_video_codec_context);
}
- if (_have_subtitle) {
- avsubtitle_free (&_subtitle);
- }
-
if (_subtitle_codec_context) {
avcodec_close (_subtitle_codec_context);
}
int frame_finished;
while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- process_video (_frame);
+ shared_ptr<Subtitle> s;
+ if (_subtitle && _subtitle->displayed_at (double (last_video_frame()) / rint (_fs->frames_per_second))) {
+ s = _subtitle;
+ }
+
+ process_video (_frame, s);
}
if (_audio_stream >= 0 && _opt->decode_audio) {
int frame_finished;
if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- process_video (_frame);
+ shared_ptr<Subtitle> s;
+ if (_subtitle && _subtitle->displayed_at (double (last_video_frame()) / rint (_fs->frames_per_second))) {
+ s = _subtitle;
+ }
+
+ process_video (_frame, s);
}
} else if (_audio_stream >= 0 && _packet.stream_index == _audio_stream && _opt->decode_audio) {
process_audio (_frame->data[0], data_size);
}
- } else if (_subtitle_stream >= 0 && _packet.stream_index == _subtitle_stream && _fs->with_subtitles) {
-
- if (_have_subtitle) {
- avsubtitle_free (&_subtitle);
- _have_subtitle = false;
- }
+ } else if (_subtitle_stream >= 0 && _packet.stream_index == _subtitle_stream) {
int got_subtitle;
- if (avcodec_decode_subtitle2 (_subtitle_codec_context, &_subtitle, &got_subtitle, &_packet) && got_subtitle) {
- _have_subtitle = true;
+ AVSubtitle sub;
+ if (avcodec_decode_subtitle2 (_subtitle_codec_context, &sub, &got_subtitle, &_packet) && got_subtitle) {
+ _subtitle.reset (new Subtitle (sub));
+ avsubtitle_free (&sub);
}
}
return _video_codec_context->sample_aspect_ratio.den;
}
-void
-FFmpegDecoder::overlay (shared_ptr<Image> image) const
-{
- if (!_have_subtitle) {
- return;
- }
-
- /* subtitle PTS in seconds */
- float const packet_time = (_subtitle.pts / AV_TIME_BASE) + float (_subtitle.pts % AV_TIME_BASE) / 1e6;
- /* hence start time for this sub */
- float const from = packet_time + (float (_subtitle.start_display_time) / 1e3);
- float const to = packet_time + (float (_subtitle.end_display_time) / 1e3);
-
- float const video_frame_time = float (last_video_frame ()) / rint (_fs->frames_per_second);
-
- if (from > video_frame_time || video_frame_time < to) {
- return;
- }
-
- for (unsigned int i = 0; i < _subtitle.num_rects; ++i) {
- AVSubtitleRect* rect = _subtitle.rects[i];
- if (rect->type != SUBTITLE_BITMAP) {
- throw DecodeError ("non-bitmap subtitles not yet supported");
- }
-
- /* XXX: all this assumes YUV420 in image */
-
- assert (rect->pict.data[0]);
-
- /* Start of the first line in the target image */
- uint8_t* frame_y_p = image->data()[0] + rect->y * image->line_size()[0];
- uint8_t* frame_u_p = image->data()[1] + (rect->y / 2) * image->line_size()[1];
- uint8_t* frame_v_p = image->data()[2] + (rect->y / 2) * image->line_size()[2];
-
- int const hlim = min (rect->y + rect->h, image->size().height) - rect->y;
-
- /* Start of the first line in the subtitle */
- uint8_t* sub_p = rect->pict.data[0];
- /* sub_p looks up into a RGB palette which is here */
- uint32_t const * palette = (uint32_t *) rect->pict.data[1];
-
- for (int sub_y = 0; sub_y < hlim; ++sub_y) {
- /* Pointers to the start of this line */
- uint8_t* sub_line_p = sub_p;
- uint8_t* frame_line_y_p = frame_y_p + rect->x;
- uint8_t* frame_line_u_p = frame_u_p + (rect->x / 2);
- uint8_t* frame_line_v_p = frame_v_p + (rect->x / 2);
-
- /* U and V are subsampled */
- uint8_t next_u = 0;
- uint8_t next_v = 0;
- int subsample_step = 0;
-
- for (int sub_x = 0; sub_x < rect->w; ++sub_x) {
-
- /* RGB value for this subtitle pixel */
- uint32_t const val = palette[*sub_line_p++];
-
- int const red = (val & 0xff);
- int const green = (val & 0xff00) >> 8;
- int const blue = (val & 0xff0000) >> 16;
- float const alpha = ((val & 0xff000000) >> 24) / 255.0;
-
- /* Alpha-blend Y */
- int const cy = *frame_line_y_p;
- *frame_line_y_p++ = int (cy * (1 - alpha)) + int (RGB_TO_Y_CCIR (red, green, blue) * alpha);
-
- /* Store up U and V */
- next_u |= ((RGB_TO_U_CCIR (red, green, blue, 0) & 0xf0) >> 4) << (4 * subsample_step);
- next_v |= ((RGB_TO_V_CCIR (red, green, blue, 0) & 0xf0) >> 4) << (4 * subsample_step);
-
- if (subsample_step == 1 && (sub_y % 2) == 0) {
- int const cu = *frame_line_u_p;
- int const cv = *frame_line_v_p;
-
- *frame_line_u_p++ =
- int (((cu & 0x0f) * (1 - alpha) + (next_u & 0x0f) * alpha)) |
- int (((cu & 0xf0) * (1 - alpha) + (next_u & 0xf0) * alpha));
-
- *frame_line_v_p++ =
- int (((cv & 0x0f) * (1 - alpha) + (next_v & 0x0f) * alpha)) |
- int (((cv & 0xf0) * (1 - alpha) + (next_v & 0xf0) * alpha));
-
- next_u = next_v = 0;
- }
-
- subsample_step = (subsample_step + 1) % 2;
- }
-
- sub_p += rect->pict.linesize[0];
- frame_y_p += image->line_size()[0];
- if ((sub_y % 2) == 0) {
- frame_u_p += image->line_size()[1];
- frame_v_p += image->line_size()[2];
- }
- }
- }
-}
-
bool
FFmpegDecoder::has_subtitles () const
{
class Options;
class Image;
class Log;
+class Subtitle;
/** @class FFmpegDecoder
* @brief A decoder using FFmpeg to decode content.
int time_base_denominator () const;
int sample_aspect_ratio_numerator () const;
int sample_aspect_ratio_denominator () const;
- void overlay (boost::shared_ptr<Image> image) const;
void setup_general ();
void setup_video ();
AVCodec* _subtitle_codec; ///< may be 0 if there is no subtitle
AVPacket _packet;
- AVSubtitle _subtitle;
- bool _have_subtitle;
+ boost::shared_ptr<Subtitle> _subtitle;
};
}
break;
case PIX_FMT_RGB24:
+ case PIX_FMT_RGBA:
return size().height;
default:
assert (false);
case PIX_FMT_YUV420P:
return 3;
case PIX_FMT_RGB24:
+ case PIX_FMT_RGBA:
return 1;
default:
assert (false);
return 0;
}
+shared_ptr<Image>
+Image::scale (Size out_size, Scaler const * scaler) const
+{
+ assert (scaler);
+
+ shared_ptr<SimpleImage> scaled (new SimpleImage (pixel_format(), out_size));
+
+ struct SwsContext* scale_context = sws_getContext (
+ size().width, size().height, pixel_format(),
+ out_size.width, out_size.height, pixel_format(),
+ scaler->ffmpeg_id (), 0, 0, 0
+ );
+
+ sws_scale (
+ scale_context,
+ data(), line_size(),
+ 0, size().height,
+ scaled->data (), scaled->line_size ()
+ );
+
+ sws_freeContext (scale_context);
+
+ return scaled;
+}
+
/** Scale this image to a given size and convert it to RGB.
* @param out_size Output image size in pixels.
* @param scaler Scaler to use.
{
_data = (uint8_t **) av_malloc (components() * sizeof (uint8_t *));
_line_size = (int *) av_malloc (components() * sizeof (int));
+
+ switch (p) {
+ case PIX_FMT_RGB24:
+ _line_size[0] = s.width * 3;
+ break;
+ case PIX_FMT_RGBA:
+ _line_size[0] = s.width * 4;
+ break;
+ case PIX_FMT_YUV420P:
+ _line_size[0] = s.width;
+ _line_size[1] = s.width / 2;
+ _line_size[2] = s.width / 2;
+ break;
+ default:
+ assert (false);
+ }
for (int i = 0; i < components(); ++i) {
- _data[i] = 0;
- _line_size[i] = 0;
+ _data[i] = (uint8_t *) av_malloc (_line_size[i] * lines (i));
}
}
av_free (_line_size);
}
-/** Set the size in bytes of each horizontal line of a given component.
- * @param i Component index.
- * @param s Size of line in bytes.
- */
-void
-SimpleImage::set_line_size (int i, int s)
-{
- _line_size[i] = s;
- _data[i] = (uint8_t *) av_malloc (s * lines (i));
-}
-
uint8_t **
SimpleImage::data () const
{
int components () const;
int lines (int) const;
boost::shared_ptr<RGBFrameImage> scale_and_convert_to_rgb (Size, int, Scaler const *) const;
+ boost::shared_ptr<Image> scale (Size, Scaler const *) const;
boost::shared_ptr<PostProcessImage> post_process (std::string) const;
void make_black ();
int * line_size () const;
Size size () const;
- void set_line_size (int, int);
-
private:
Size _size; ///< size in pixels
uint8_t** _data; ///< array of pointers to components
+/*
+ Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
#include <iostream>
#include <Magick++/Image.h>
#include "imagemagick_decoder.h"
#include "image.h"
using namespace std;
+using namespace boost;
ImageMagickDecoder::ImageMagickDecoder (
boost::shared_ptr<const FilmState> s, boost::shared_ptr<const Options> o, Job* j, Log* l, bool minimal, bool ignore_length)
}
- process_video (image.frame ());
+ process_video (image.frame (), shared_ptr<Subtitle> ());
_done = true;
return false;
+/*
+ Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
#include "decoder.h"
namespace Magick {
}
void
-J2KStillEncoder::process_video (shared_ptr<Image> yuv, int frame)
+J2KStillEncoder::process_video (shared_ptr<Image> yuv, int frame, shared_ptr<Subtitle> sub)
{
pair<string, string> const s = Filter::ffmpeg_strings (_fs->filters);
DCPVideoFrame* f = new DCPVideoFrame (
- yuv, _opt->out_size, _opt->padding, _fs->scaler, 0, _fs->frames_per_second, s.second,
+ yuv, sub, _opt->out_size, _opt->padding, _fs->scaler, 0, _fs->frames_per_second, s.second,
Config::instance()->colour_lut_index(), Config::instance()->j2k_bandwidth(),
_log
);
J2KStillEncoder (boost::shared_ptr<const FilmState>, boost::shared_ptr<const Options>, Log *);
void process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format) {}
- void process_video (boost::shared_ptr<Image>, int);
+ void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>);
void process_audio (uint8_t *, int) {}
void process_end () {}
};
}
void
-J2KWAVEncoder::process_video (shared_ptr<Image> yuv, int frame)
+J2KWAVEncoder::process_video (shared_ptr<Image> yuv, int frame, shared_ptr<Subtitle> sub)
{
boost::mutex::scoped_lock lock (_worker_mutex);
TIMING ("adding to queue of %1", _queue.size ());
_queue.push_back (boost::shared_ptr<DCPVideoFrame> (
new DCPVideoFrame (
- yuv, _opt->out_size, _opt->padding, _fs->scaler, frame, _fs->frames_per_second, s.second,
+ yuv, sub, _opt->out_size, _opt->padding, _fs->scaler, frame, _fs->frames_per_second, s.second,
Config::instance()->colour_lut_index (), Config::instance()->j2k_bandwidth (),
_log
)
class DCPVideoFrame;
class Image;
class Log;
+class Subtitle;
/** @class J2KWAVEncoder
* @brief An encoder which writes JPEG2000 and WAV files.
~J2KWAVEncoder ();
void process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format);
- void process_video (boost::shared_ptr<Image>, int);
+ void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>);
void process_audio (uint8_t *, int);
void process_end ();
{
boost::mutex::scoped_lock lm (_mutex);
for (list<shared_ptr<Job> >::iterator i = _jobs.begin(); i != _jobs.end(); ++i) {
+ if ((*i)->running ()) {
+ /* Something is already happening */
+ break;
+ }
+
if ((*i)->is_new()) {
shared_ptr<Job> r = (*i)->required ();
if (!r || r->finished_ok ()) {
* @param t true to return a temporary file path, otherwise a permanent one.
* @return The path to write this video frame to.
*/
- std::string frame_out_path (int f, bool t) const {
+ std::string frame_out_path (int f, bool t, std::string e = "") const {
+ if (e.empty ()) {
+ e = _frame_out_extension;
+ }
+
std::stringstream s;
s << _frame_out_path << "/";
s.width (8);
- s << std::setfill('0') << f << _frame_out_extension;
+ s << std::setfill('0') << f << e;
if (t) {
s << ".tmp";
shared_ptr<SimpleImage> image (new SimpleImage (pixel_format, in_size));
- for (int i = 0; i < image->components(); ++i) {
- int line_size;
- s >> line_size;
- image->set_line_size (i, line_size);
- }
-
for (int i = 0; i < image->components(); ++i) {
socket->read_definite_and_consume (image->data()[i], image->line_size()[i] * image->lines(i), 30);
}
+
+ /* XXX: subtitle */
+ DCPVideoFrame dcp_video_frame (
+ image, shared_ptr<Subtitle> (), out_size, padding, scaler, frame, frames_per_second, post_process, colour_lut_index, j2k_bandwidth, _log
+ );
- DCPVideoFrame dcp_video_frame (image, out_size, padding, scaler, frame, frames_per_second, post_process, colour_lut_index, j2k_bandwidth, _log);
shared_ptr<EncodedData> encoded = dcp_video_frame.encode_locally ();
encoded->send (socket);
_TIFFfree (raster);
TIFFClose (t);
- process_video (image.frame ());
+ process_video (image.frame (), shared_ptr<Subtitle> ());
++_iter;
return false;
#include <sstream>
#include <iomanip>
#include <iostream>
+#include <fstream>
#include <boost/filesystem.hpp>
#include <tiffio.h>
#include "tiff_encoder.h"
#include "options.h"
#include "exceptions.h"
#include "image.h"
+#include "subtitle.h"
using namespace std;
using namespace boost;
}
void
-TIFFEncoder::process_video (shared_ptr<Image> image, int frame)
+TIFFEncoder::process_video (shared_ptr<Image> image, int frame, shared_ptr<Subtitle> sub)
{
shared_ptr<Image> scaled = image->scale_and_convert_to_rgb (_opt->out_size, _opt->padding, _fs->scaler);
string tmp_file = _opt->frame_out_path (frame, true);
TIFFClose (output);
- boost::filesystem::rename (tmp_file, _opt->frame_out_path (frame, false));
+ filesystem::rename (tmp_file, _opt->frame_out_path (frame, false));
+
+ if (sub) {
+ float const x_scale = float (_opt->out_size.width) / _fs->size.width;
+ float const y_scale = float (_opt->out_size.height) / _fs->size.height;
+
+ string tmp_metadata_file = _opt->frame_out_path (frame, false, ".sub");
+ ofstream metadata (tmp_metadata_file.c_str ());
+
+ list<shared_ptr<SubtitleImage> > images = sub->images ();
+ int n = 0;
+ for (list<shared_ptr<SubtitleImage> >::iterator i = images.begin(); i != images.end(); ++i) {
+ stringstream ext;
+ ext << ".sub." << n << ".tiff";
+
+ string tmp_sub_file = _opt->frame_out_path (frame, true, ext.str ());
+ output = TIFFOpen (tmp_sub_file.c_str(), "w");
+ if (output == 0) {
+ throw CreateFileError (tmp_file);
+ }
+
+ Size new_size = (*i)->image()->size ();
+ new_size.width *= x_scale;
+ new_size.height *= y_scale;
+ shared_ptr<Image> scaled = (*i)->image()->scale (new_size, _fs->scaler);
+
+ TIFFSetField (output, TIFFTAG_IMAGEWIDTH, scaled->size().width);
+ TIFFSetField (output, TIFFTAG_IMAGELENGTH, scaled->size().height);
+ TIFFSetField (output, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
+ TIFFSetField (output, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
+ TIFFSetField (output, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
+ TIFFSetField (output, TIFFTAG_BITSPERSAMPLE, 8);
+ TIFFSetField (output, TIFFTAG_SAMPLESPERPIXEL, 4);
+
+ if (TIFFWriteEncodedStrip (output, 0, scaled->data()[0], scaled->size().width * scaled->size().height * 4) == 0) {
+ throw WriteFileError (tmp_file, 0);
+ }
+
+ TIFFClose (output);
+ filesystem::rename (tmp_sub_file, _opt->frame_out_path (frame, false, ext.str ()));
+
+ metadata << "image " << n << "\n"
+ << "x " << (*i)->position().x << "\n"
+ << "y " << (*i)->position().y << "\n";
+
+ metadata.close ();
+ filesystem::rename (tmp_metadata_file, _opt->frame_out_path (frame, false, ".sub"));
+ }
+
+ }
+
frame_done (frame);
}
TIFFEncoder (boost::shared_ptr<const FilmState> s, boost::shared_ptr<const Options> o, Log* l);
void process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format) {}
- void process_video (boost::shared_ptr<Image>, int);
+ void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>);
void process_audio (uint8_t *, int) {}
void process_end () {}
};
screen.cc
server.cc
sound_processor.cc
+ subtitle.cc
thumbs_job.cc
tiff_decoder.cc
tiff_encoder.cc
BOOST_AUTO_TEST_CASE (client_server_test)
{
shared_ptr<SimpleImage> image (new SimpleImage (PIX_FMT_RGB24, Size (1998, 1080)));
- image->set_line_size (0, 1998 * 3);
uint8_t* p = image->data()[0];
shared_ptr<DCPVideoFrame> frame (
new DCPVideoFrame (
image,
+ shared_ptr<Subtitle> (),
Size (1998, 1080),
0,
Scaler::from_id ("bicubic"),