}
}
-/** Called to tell the world that some audio data is ready
- * @param audio Audio data.
- */
-void
-Decoder::process_audio (shared_ptr<AudioBuffers> audio)
-{
- /* Maybe apply gain */
- if (_film->audio_gain() != 0) {
- float const linear_gain = pow (10, _film->audio_gain() / 20);
- for (int i = 0; i < audio->channels(); ++i) {
- for (int j = 0; j < audio->frames(); ++j) {
- audio->data(i)[j] *= linear_gain;
- }
- }
- }
-
- Audio (audio);
-}
-
/** Called by subclasses to tell the world that some video data is ready.
* We do some post-processing / filtering then emit it for listeners.
* @param frame to decode; caller manages memory.
*/
void
-Decoder::process_video (AVFrame const * frame)
+Decoder::emit_video (shared_ptr<Image> image)
{
- shared_ptr<FilterGraph> graph;
-
- list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
- ++i;
- }
-
- if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, _opt->apply_crop, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
- _filter_graphs.push_back (graph);
- _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
- } else {
- graph = *i;
+ shared_ptr<Subtitle> sub;
+ if (_timed_subtitle && _timed_subtitle->displayed_at (double (video_frame()) / _film->frames_per_second())) {
+ sub = _timed_subtitle->subtitle ();
}
- list<shared_ptr<Image> > images = graph->process (frame);
-
- for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
- shared_ptr<Subtitle> sub;
- if (_timed_subtitle && _timed_subtitle->displayed_at (double (video_frame()) / _film->frames_per_second())) {
- sub = _timed_subtitle->subtitle ();
- }
-
- emit_video (*i, sub);
- }
+ Video (image, sub);
}
void
}
void
-Decoder::process_subtitle (shared_ptr<TimedSubtitle> s)
+Decoder::emit_subtitle (shared_ptr<TimedSubtitle> s)
{
_timed_subtitle = s;
virtual PixelFormat pixel_format () const = 0;
- void process_video (AVFrame const *);
- void process_audio (boost::shared_ptr<AudioBuffers>);
- void process_subtitle (boost::shared_ptr<TimedSubtitle>);
+ void emit_video (boost::shared_ptr<Image>);
+ void emit_subtitle (boost::shared_ptr<TimedSubtitle>);
void repeat_last_video ();
/** our Film */
SourceFrame _video_frame;
- std::list<boost::shared_ptr<FilterGraph> > _filter_graphs;
-
boost::shared_ptr<TimedSubtitle> _timed_subtitle;
boost::shared_ptr<Image> _last_image;
#include "util.h"
#include "log.h"
#include "ffmpeg_decoder.h"
+#include "filter_graph.h"
#include "subtitle.h"
using std::cout;
using std::string;
using std::vector;
using std::stringstream;
+using std::list;
using boost::shared_ptr;
using boost::optional;
int frame_finished;
while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- process_video (_frame);
+ filter_and_emit_video (_frame);
}
if (_audio_stream && _opt->decode_audio && _film->use_content_audio()) {
);
assert (_audio_codec_context->channels == _film->audio_channels());
- process_audio (deinterleave_audio (_frame->data[0], data_size));
+ Audio (deinterleave_audio (_frame->data[0], data_size));
}
}
if (delta > -one_frame) {
/* Process this frame */
- process_video (_frame);
+ filter_and_emit_video (_frame);
} else {
/* Otherwise we are omitting a frame to keep things right */
_film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
if (s) {
shared_ptr<AudioBuffers> audio (new AudioBuffers (_audio_stream.get().channels(), s));
audio->make_silent ();
- process_audio (audio);
+ Audio (audio);
}
}
);
assert (_audio_codec_context->channels == _film->audio_channels());
- process_audio (deinterleave_audio (_frame->data[0], data_size));
+ Audio (deinterleave_audio (_frame->data[0], data_size));
}
}
indicate that the previous subtitle should stop.
*/
if (sub.num_rects > 0) {
- process_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub, _first_video.get())));
+ emit_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub, _first_video.get())));
} else {
- process_subtitle (shared_ptr<TimedSubtitle> ());
+ emit_subtitle (shared_ptr<TimedSubtitle> ());
}
avsubtitle_free (&sub);
}
Decoder::set_subtitle_stream (s);
setup_subtitle ();
}
+
+void
+FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
+{
+ shared_ptr<FilterGraph> graph;
+
+ list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+ while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
+ ++i;
+ }
+
+ if (i == _filter_graphs.end ()) {
+ graph.reset (new FilterGraph (_film, this, _opt->apply_crop, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+ _filter_graphs.push_back (graph);
+ _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
+ } else {
+ graph = *i;
+ }
+
+ list<shared_ptr<Image> > images = graph->process (frame);
+
+ for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+ emit_video (*i);
+ }
+}
/* Methods to query our input video */
float frames_per_second () const;
Size native_size () const;
+ int time_base_numerator () const;
+ int time_base_denominator () const;
+ int sample_aspect_ratio_numerator () const;
+ int sample_aspect_ratio_denominator () const;
void set_audio_stream (boost::optional<AudioStream>);
void set_subtitle_stream (boost::optional<SubtitleStream>);
bool pass ();
PixelFormat pixel_format () const;
- int time_base_numerator () const;
- int time_base_denominator () const;
- int sample_aspect_ratio_numerator () const;
- int sample_aspect_ratio_denominator () const;
AVSampleFormat audio_sample_format () const;
int bytes_per_audio_sample () const;
+ void filter_and_emit_video (AVFrame *);
+
void setup_general ();
void setup_video ();
void setup_audio ();
boost::optional<double> _first_video;
boost::optional<double> _first_audio;
+
+ std::list<boost::shared_ptr<FilterGraph> > _filter_graphs;
};
#endif
#include <libavformat/avio.h>
}
-#include "film.h"
#include "decoder.h"
#include "filter_graph.h"
#include "ffmpeg_compatibility.h"
#include "filter.h"
#include "exceptions.h"
#include "image.h"
+#include "film.h"
+#include "ffmpeg_decoder.h"
using std::stringstream;
using std::string;
using std::list;
using boost::shared_ptr;
-FilterGraph::FilterGraph (shared_ptr<Film> film, Decoder* decoder, bool crop, Size s, AVPixelFormat p)
+FilterGraph::FilterGraph (shared_ptr<Film> film, FFmpegDecoder* decoder, bool crop, Size s, AVPixelFormat p)
: _buffer_src_context (0)
, _buffer_sink_context (0)
, _size (s)
, _pixel_format (p)
{
- stringstream fs;
- Size size_after_crop;
-
- if (crop) {
- size_after_crop = film->cropped_size (decoder->native_size ());
- fs << crop_string (Position (film->crop().left, film->crop().top), size_after_crop);
- } else {
- size_after_crop = decoder->native_size ();
- fs << crop_string (Position (0, 0), size_after_crop);
- }
-
string filters = Filter::ffmpeg_strings (film->filters()).first;
if (!filters.empty ()) {
filters += ",";
}
- filters += fs.str ();
+ if (crop) {
+ filters += crop_string (Position (film->crop().left, film->crop().top), film->cropped_size (decoder->native_size()));
+ } else {
+ filters += crop_string (Position (0, 0), decoder->native_size());
+ }
avfilter_register_all ();
#include "util.h"
-class Decoder;
class Image;
-class Film;
+class VideoFilter;
+class FFmpegDecoder;
class FilterGraph
{
public:
- FilterGraph (boost::shared_ptr<Film> film, Decoder* decoder, bool crop, Size s, AVPixelFormat p);
+ FilterGraph (boost::shared_ptr<Film> film, FFmpegDecoder* decoder, bool crop, Size s, AVPixelFormat p);
bool can_process (Size s, AVPixelFormat p) const;
std::list<boost::shared_ptr<Image> > process (AVFrame const * frame);
--- /dev/null
+#include "gain.h"
+
+using boost::shared_ptr;
+
+/** @param gain gain in dB */
+Gain::Gain (Log* log, float gain)
+ : AudioProcessor (log)
+ , _gain (gain)
+{
+
+}
+
+void
+Gain::process_audio (shared_ptr<AudioBuffers> b)
+{
+ if (_gain != 0) {
+ float const linear_gain = pow (10, _gain / 20);
+ for (int i = 0; i < b->channels(); ++i) {
+ for (int j = 0; j < b->frames(); ++j) {
+ b->data(i)[j] *= linear_gain;
+ }
+ }
+ }
+
+ Audio (b);
+}
--- /dev/null
+#include "processor.h"
+
+class Gain : public AudioProcessor
+{
+public:
+ Gain (Log* log, float gain);
+
+ void process_audio (boost::shared_ptr<AudioBuffers>);
+
+private:
+ float _gain;
+};
* @param p Pixel format.
* @param s Size in pixels.
*/
-SimpleImage::SimpleImage (AVPixelFormat p, Size s, function<int (int)> rounder)
+SimpleImage::SimpleImage (AVPixelFormat p, Size s, function<int (int, int const *)> stride_computer)
: Image (p)
, _size (s)
{
}
for (int i = 0; i < components(); ++i) {
- _stride[i] = rounder (_line_size[i]);
+ _stride[i] = stride_computer (i, _line_size);
_data[i] = (uint8_t *) av_malloc (_stride[i] * lines (i));
}
}
}
AlignedImage::AlignedImage (AVPixelFormat f, Size s)
- : SimpleImage (f, s, boost::bind (round_up, _1, 32))
+ : SimpleImage (f, s, boost::bind (stride_round_up, _1, _2, 32))
{
}
-CompactImage::CompactImage (AVPixelFormat f, Size s)
- : SimpleImage (f, s, boost::bind (round_up, _1, 1))
-{
- setup_picture ();
-}
-
-CompactImage::CompactImage (shared_ptr<Image> im)
- : SimpleImage (im->pixel_format(), im->size(), boost::bind (round_up, _1, 1))
+AlignedImage::AlignedImage (shared_ptr<Image> im)
+ : SimpleImage (im->pixel_format(), im->size(), boost::bind (stride_round_up, _1, _2, 1))
{
assert (components() == im->components());
o += im->stride()[c];
}
}
+}
+
+CompactImage::CompactImage (AVPixelFormat f, Size s)
+ : SimpleImage (f, s, boost::bind (stride_round_up, _1, _2, 1))
+{
- setup_picture ();
}
-void
-CompactImage::setup_picture ()
+CompactImage::CompactImage (shared_ptr<Image> im)
+ : SimpleImage (im->pixel_format(), im->size(), boost::bind (stride_round_up, _1, _2, 1))
{
+ assert (components() == im->components());
+
for (int c = 0; c < components(); ++c) {
- _picture.data[c] = data()[c];
- _picture.linesize[c] = line_size()[c];
+
+ assert (line_size()[c] == im->line_size()[c]);
+
+ uint8_t* t = data()[c];
+ uint8_t* o = im->data()[c];
+
+ for (int y = 0; y < lines(c); ++y) {
+ memcpy (t, o, line_size()[c]);
+ t += stride()[c];
+ o += im->stride()[c];
+ }
}
}
class SimpleImage : public Image
{
public:
- SimpleImage (AVPixelFormat, Size, boost::function<int (int)> rounder);
+ SimpleImage (AVPixelFormat, Size, boost::function<int (int, int const *)> rounder);
~SimpleImage ();
uint8_t ** data () const;
{
public:
AlignedImage (AVPixelFormat, Size);
+ AlignedImage (boost::shared_ptr<Image>);
};
class CompactImage : public SimpleImage
public:
CompactImage (AVPixelFormat, Size);
CompactImage (boost::shared_ptr<Image>);
-
- AVPicture const * picture () const {
- return &_picture;
- }
-
-private:
- void setup_picture ();
-
- AVPicture _picture;
};
#endif
#include "image.h"
#include "film.h"
-using namespace std;
-using namespace boost;
+using std::cout;
+using boost::shared_ptr;
ImageMagickDecoder::ImageMagickDecoder (
boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j)
}
Size size = native_size ();
- CompactImage image (PIX_FMT_RGB24, size);
+ shared_ptr<CompactImage> image (new CompactImage (PIX_FMT_RGB24, size));
- uint8_t* p = image.data()[0];
+ uint8_t* p = image->data()[0];
for (int y = 0; y < size.height; ++y) {
for (int x = 0; x < size.width; ++x) {
Magick::Color c = _magick_image->pixelColor (x, y);
}
- process_video ((AVFrame const *) image.picture());
+ emit_video (image);
_done = true;
return false;
{}
};
+class VideoProcessor : public Processor, public VideoSource, public VideoSink
+{
+public:
+ VideoProcessor (Log* log)
+ : Processor (log)
+ {}
+};
+
#endif
#include "options.h"
#include "film.h"
-using namespace std;
-using namespace boost;
+using std::cout;
+using std::string;
+using std::stringstream;
+using boost::shared_ptr;
/** @param f Our Film.
* @param o Options.
{
string const dir = _film->content_path ();
- if (!filesystem::is_directory (dir)) {
+ if (!boost::filesystem::is_directory (dir)) {
throw DecodeError ("TIFF content must be in a directory");
}
- for (filesystem::directory_iterator i = filesystem::directory_iterator (dir); i != filesystem::directory_iterator(); ++i) {
+ for (boost::filesystem::directory_iterator i = boost::filesystem::directory_iterator (dir); i != boost::filesystem::directory_iterator(); ++i) {
/* Aah, the sweet smell of progress */
#if BOOST_FILESYSTEM_VERSION == 3
- string const ext = filesystem::path(*i).extension().string();
- string const l = filesystem::path(*i).leaf().generic_string();
+ string const ext = boost::filesystem::path(*i).extension().string();
+ string const l = boost::filesystem::path(*i).leaf().generic_string();
#else
- string const ext = filesystem::path(*i).extension();
+ string const ext = boost::filesystem::path(*i).extension();
string const l = i->leaf ();
#endif
if (ext == ".tif" || ext == ".tiff") {
_files.sort ();
_iter = _files.begin ();
-
}
float
throw DecodeError ("could not read TIFF data");
}
- CompactImage image (PIX_FMT_RGB24, Size (width, height));
+ shared_ptr<CompactImage> image (new CompactImage (PIX_FMT_RGB24, Size (width, height)));
- uint8_t* p = image.data()[0];
+ uint8_t* p = image->data()[0];
for (uint32_t y = 0; y < height; ++y) {
for (uint32_t x = 0; x < width; ++x) {
uint32_t const i = (height - y - 1) * width + x;
_TIFFfree (raster);
TIFFClose (t);
- process_video ((AVFrame const *) image.picture ());
+ emit_video (image);
++_iter;
return false;
#include "film.h"
#include "matcher.h"
#include "delay_line.h"
+#include "options.h"
+#include "gain.h"
using std::string;
using boost::shared_ptr;
{
assert (_encoder);
- AudioStream st = f->audio_stream().get();
-
- _matcher.reset (new Matcher (f->log(), st.sample_rate(), f->frames_per_second()));
- _delay_line.reset (new DelayLine (f->log(), st.channels(), f->audio_delay() * st.sample_rate() / 1000));
+ if (f->audio_stream()) {
+ AudioStream st = f->audio_stream().get();
+ _matcher.reset (new Matcher (f->log(), st.sample_rate(), f->frames_per_second()));
+ _delay_line.reset (new DelayLine (f->log(), st.channels(), f->audio_delay() * st.sample_rate() / 1000));
+ _gain.reset (new Gain (f->log(), f->audio_gain()));
+ }
/* Set up the decoder to use the film's set streams */
_decoder->set_audio_stream (f->audio_stream ());
_decoder->set_subtitle_stream (f->subtitle_stream ());
- _decoder->connect_video (_matcher);
- _matcher->connect_video (_encoder);
-
- _decoder->connect_audio (_delay_line);
- _delay_line->connect_audio (_matcher);
- _matcher->connect_audio (_delay_line);
+ if (_matcher) {
+ _decoder->connect_video (_matcher);
+ _matcher->connect_video (_encoder);
+ } else {
+ _decoder->connect_video (_encoder);
+ }
+
+ if (_matcher && _delay_line) {
+ _decoder->connect_audio (_delay_line);
+ _delay_line->connect_audio (_matcher);
+ _matcher->connect_audio (_gain);
+ _gain->connect_audio (_encoder);
+ }
}
/** Run the decoder, passing its output to the encoder, until the decoder
class Encoder;
class FilmState;
class Matcher;
+class VideoFilter;
+class Gain;
/** @class Transcoder
* @brief A class which takes a FilmState and some Options, then uses those to transcode a Film.
boost::shared_ptr<Decoder> _decoder;
boost::shared_ptr<Matcher> _matcher;
boost::shared_ptr<DelayLine> _delay_line;
+ boost::shared_ptr<Gain> _gain;
};
*/
int
-round_up (int a, int t)
+stride_round_up (int c, int const * stride, int t)
{
- a += (t - 1);
+ int const a = stride[c] + (t - 1);
return a - (a % t);
}
+int
+stride_lookup (int c, int const * stride)
+{
+ return stride[c];
+}
+
/** Read a sequence of key / value pairs from a text stream;
* the keys are the first words on the line, and the values are
* the remainder of the line following the key. Lines beginning
extern int dcp_audio_sample_rate (int);
extern DCPFrameRate dcp_frame_rate (float);
extern std::string colour_lut_index_to_name (int index);
-extern int round_up (int, int);
+extern int stride_round_up (int, int const *, int);
+extern int stride_lookup (int c, int const * stride);
extern std::multimap<std::string, std::string> read_key_value (std::istream& s);
extern int get_required_int (std::multimap<std::string, std::string> const & kv, std::string k);
extern float get_required_float (std::multimap<std::string, std::string> const & kv, std::string k);
film.cc
filter.cc
format.cc
+ gain.cc
image.cc
imagemagick_decoder.cc
imagemagick_encoder.cc
dcp_trim_start 42
dcp_trim_end 99
dcp_ab 1
-selected_audio_stream -1
+use_content_audio 1
audio_gain 0
audio_delay 0
still_duration 10
-selected_subtitle_stream -1
with_subtitles 0
subtitle_offset 0
subtitle_scale 1
width 0
height 0
length 0
-audio_sample_rate 0
content_digest
-has_subtitles 0
frames_per_second 0
BOOST_AUTO_TEST_CASE (stream_test)
{
- AudioStream a ("4 9 hello there world");
+ AudioStream a ("4 44100 1 hello there world");
BOOST_CHECK_EQUAL (a.id(), 4);
- BOOST_CHECK_EQUAL (a.channels(), 9);
+ BOOST_CHECK_EQUAL (a.sample_rate(), 44100);
+ BOOST_CHECK_EQUAL (a.channel_layout(), 1);
BOOST_CHECK_EQUAL (a.name(), "hello there world");
- BOOST_CHECK_EQUAL (a.to_string(), "4 9 hello there world");
+ BOOST_CHECK_EQUAL (a.to_string(), "4 44100 1 hello there world");
SubtitleStream s ("5 a b c");
BOOST_CHECK_EQUAL (s.id(), 5);