, _player_b (film_b->player ())
, _job (j)
, _encoder (new Encoder (film_a, j))
- , _combiner (new Combiner (film_a->log()))
+ , _combiner (new Combiner)
{
_player_a->Video.connect (bind (&Combiner::process_video, _combiner, _1, _2, _3, _4));
_player_b->Video.connect (bind (&Combiner::process_video_b, _combiner, _1, _2, _3, _4));
AnalyseAudioJob::AnalyseAudioJob (shared_ptr<Film> f)
: Job (f)
- , _done (0)
+ , _next (0)
, _samples_per_point (1)
{
shared_ptr<Player> player = _film->player ();
player->disable_video ();
- player->Audio.connect (bind (&AnalyseAudioJob::audio, this, _1));
+ player->Audio.connect (bind (&AnalyseAudioJob::audio, this, _1, _2));
_samples_per_point = max (int64_t (1), _film->time_to_audio_frames (_film->length()) / _num_points);
_current.resize (MAX_AUDIO_CHANNELS);
_analysis.reset (new AudioAnalysis (MAX_AUDIO_CHANNELS));
-
- while (!player->pass()) {
- set_progress (float (_done) / _film->time_to_audio_frames (_film->length ()));
+
+ _next = 0;
+ while (_next < _film->length()) {
+ set_progress (double (_next) / _film->length ());
}
_analysis->write (_film->audio_analysis_path ());
}
void
-AnalyseAudioJob::audio (shared_ptr<const AudioBuffers> b)
+AnalyseAudioJob::audio (shared_ptr<const AudioBuffers> b, Time t)
{
for (int i = 0; i < b->frames(); ++i) {
for (int j = 0; j < b->channels(); ++j) {
_current[j] = AudioPoint ();
}
}
-
- ++_done;
}
+
+ _next = (t + _film->audio_frames_to_time (b->frames()));
}
#include "job.h"
#include "audio_analysis.h"
+#include "types.h"
class AudioBuffers;
private:
void audio (boost::shared_ptr<const AudioBuffers>);
- int64_t _done;
+ Time _next;
int64_t _samples_per_point;
std::vector<AudioPoint> _current;
{
if (_audio_content->content_audio_frame_rate() != _output_audio_frame_rate) {
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+
stringstream s;
s << String::compose ("Will resample audio from %1 to %2", _audio_content->content_audio_frame_rate(), _output_audio_frame_rate);
- _film->log()->log (s.str ());
+ film->log()->log (s.str ());
/* We will be using planar float data when we call the
resampler. As far as I can see, the audio channel
{
if (_swr_context) {
- shared_ptr<AudioBuffers> out (new AudioBuffers (_film->audio_mapping().dcp_channels(), 256));
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+
+ shared_ptr<AudioBuffers> out (new AudioBuffers (film->audio_mapping().dcp_channels(), 256));
while (1) {
int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
Audio (data, time);
- _next_audio = time + _film->audio_frames_to_time (data->frames());
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ _next_audio = time + film->audio_frames_to_time (data->frames());
}
using boost::shared_ptr;
-Combiner::Combiner (shared_ptr<Log> log)
- : VideoProcessor (log)
+Combiner::Combiner ()
{
}
* @brief Class for combining two video streams.
*/
-#include "processor.h"
+#include "video_source.h"
+#include "video_sink.h"
/** @class Combiner
* @brief A class which can combine two video streams into one, with
* one image used for the left half of the screen and the other for
* the right.
*/
-class Combiner : public VideoProcessor
+class Combiner : public VideoSource, public VideoSink
{
public:
- Combiner (boost::shared_ptr<Log> log);
+ Combiner ();
void process_video (boost::shared_ptr<const Image> i, bool, boost::shared_ptr<Subtitle> s, Time);
void process_video_b (boost::shared_ptr<const Image> i, bool, boost::shared_ptr<Subtitle> s, Time);
* expressed relative to our source's start.
* @param t Time.
*/
- virtual void seek (Time t) {}
+ virtual void seek (Time) {}
/** Seek back one video frame */
virtual void seek_back () {}
}
-bool
+void
FFmpegDecoder::pass ()
{
int r = av_read_frame (_format_context, &_packet);
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
av_strerror (r, buf, sizeof(buf));
- _film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
}
/* Get any remaining frames */
decode_audio_packet ();
}
- return true;
+ return;
}
avcodec_get_frame_defaults (_frame);
if (sub.num_rects > 0) {
shared_ptr<TimedSubtitle> ts;
try {
- emit_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub)));
+ subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub)));
} catch (...) {
/* some problem with the subtitle; we probably didn't understand it */
}
} else {
- emit_subtitle (shared_ptr<TimedSubtitle> ());
+ subtitle (shared_ptr<TimedSubtitle> ());
}
avsubtitle_free (&sub);
}
}
av_free_packet (&_packet);
- return false;
}
/** @param data pointer to array of pointers to buffers.
}
libdcp::Size
-FFmpegDecoder::native_size () const
+FFmpegDecoder::video_size () const
{
return libdcp::Size (_video_codec_context->width, _video_codec_context->height);
}
-PixelFormat
-FFmpegDecoder::pixel_format () const
-{
- return _video_codec_context->pix_fmt;
-}
-
string
FFmpegDecoder::stream_name (AVStream* s) const
{
return av_get_bytes_per_sample (audio_sample_format ());
}
-bool
+void
FFmpegDecoder::seek (Time t)
{
- return do_seek (t, false, false);
+ do_seek (t, false, false);
}
-bool
+void
FFmpegDecoder::seek_back ()
{
if (next() < 2.5) {
- return true;
+ return;
}
- return do_seek (next() - 2.5 * TIME_HZ / video_frame_rate(), true, true);
+ do_seek (next() - 2.5 * TIME_HZ / video_frame_rate(), true, true);
}
-bool
+void
FFmpegDecoder::seek_forward ()
{
if (next() >= (video_length() - video_frame_rate())) {
- return true;
+ return;
}
- return do_seek (next() - 0.5 * TIME_HZ / video_frame_rate(), true, true);
+ do_seek (next() - 0.5 * TIME_HZ / video_frame_rate(), true, true);
}
-bool
+void
FFmpegDecoder::do_seek (Time t, bool backwards, bool accurate)
{
int64_t const vt = t / (av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
-
- int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
+ av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
avcodec_flush_buffers (_video_codec_context);
if (_subtitle_codec_context) {
while (1) {
int r = av_read_frame (_format_context, &_packet);
if (r < 0) {
- return true;
+ return;
}
avcodec_get_frame_defaults (_frame);
}
}
- return r < 0;
+ return;
}
void
if (i == _filter_graphs.end ()) {
graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
_filter_graphs.push_back (graph);
- _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
} else {
graph = *i;
}
(as per old Matcher)
*/
Time const t = bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ;
- emit_video (*i, false, t);
+ video (*i, false, t);
} else {
- _film->log()->log ("Dropping frame without PTS");
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log ("Dropping frame without PTS");
}
}
FFmpegDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const FFmpegContent>, bool video, bool audio, bool subtitles);
~FFmpegDecoder ();
- bool pass ();
- bool seek (Time);
- bool seek_back ();
- bool seek_forward ();
+ /* Decoder */
+
+ void pass ();
+ void seek (Time);
+ void seek_back ();
+ void seek_forward ();
Time next () const;
+ /* VideoDecoder */
+
float video_frame_rate () const;
- libdcp::Size native_size () const;
+ libdcp::Size video_size () const;
ContentVideoFrame video_length () const;
+ /* FFmpegDecoder */
+
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > subtitle_streams () const {
return _subtitle_streams;
}
PixelFormat pixel_format () const;
AVSampleFormat audio_sample_format () const;
int bytes_per_audio_sample () const;
- bool do_seek (Time, bool, bool);
+ void do_seek (Time, bool, bool);
void setup_general ();
void setup_video ();
using std::string;
using std::list;
using boost::shared_ptr;
+using boost::weak_ptr;
using libdcp::Size;
/** Construct a FilterGraph for the settings in a film.
* @param s Size of the images to process.
* @param p Pixel format of the images to process.
*/
-FilterGraph::FilterGraph (shared_ptr<const Film> film, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p)
+FilterGraph::FilterGraph (weak_ptr<const Film> weak_film, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p)
: _buffer_src_context (0)
, _buffer_sink_context (0)
, _size (s)
, _pixel_format (p)
{
+ shared_ptr<const Film> film = weak_film.lock ();
+ assert (film);
+
string filters = Filter::ffmpeg_strings (film->filters()).first;
if (!filters.empty ()) {
filters += N_(",");
}
Crop crop = decoder->ffmpeg_content()->crop ();
- libdcp::Size cropped_size = decoder->native_size ();
+ libdcp::Size cropped_size = decoder->video_size ();
cropped_size.width -= crop.left + crop.right;
cropped_size.height -= crop.top + crop.bottom;
filters += crop_string (Position (crop.left, crop.top), cropped_size);
class FilterGraph
{
public:
- FilterGraph (boost::shared_ptr<const Film>, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p);
+ FilterGraph (boost::weak_ptr<const Film>, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p);
bool can_process (libdcp::Size s, AVPixelFormat p) const;
std::list<boost::shared_ptr<Image> > process (AVFrame const * frame);
: Decoder (f)
, VideoDecoder (f, c)
, _imagemagick_content (c)
- , _position (0)
{
}
libdcp::Size
-ImageMagickDecoder::native_size () const
+ImageMagickDecoder::video_size () const
{
- if (!_native_size) {
+ if (!_video_size) {
using namespace MagickCore;
Magick::Image* image = new Magick::Image (_imagemagick_content->file().string());
- _native_size = libdcp::Size (image->columns(), image->rows());
+ _video_size = libdcp::Size (image->columns(), image->rows());
delete image;
}
- return _native_size.get ();
+ return _video_size.get ();
}
int
return _imagemagick_content->video_length ();
}
-bool
+float
+ImageMagickDecoder::video_frame_rate () const
+{
+ boost::shared_ptr<const Film> f = _film.lock ();
+ if (!f) {
+ return 24;
+ }
+
+ return f->dcp_video_frame_rate ();
+}
+
+void
ImageMagickDecoder::pass ()
{
if (_position < 0 || _position >= _imagemagick_content->video_length ()) {
- return true;
+ return;
}
if (_image) {
- emit_video (_image, true, double (_position) / 24);
+ video (_image, true, double (_position) / video_frame_rate());
_position++;
- return false;
+ return;
}
Magick::Image* magick_image = new Magick::Image (_imagemagick_content->file().string ());
- _native_size = libdcp::Size (magick_image->columns(), magick_image->rows());
+ _video_size = libdcp::Size (magick_image->columns(), magick_image->rows());
- _image.reset (new SimpleImage (PIX_FMT_RGB24, _native_size.get(), false));
+ _image.reset (new SimpleImage (PIX_FMT_RGB24, _video_size.get(), false));
using namespace MagickCore;
uint8_t* p = _image->data()[0];
- for (int y = 0; y < _native_size->height; ++y) {
- for (int x = 0; x < _native_size->width; ++x) {
+ for (int y = 0; y < _video_size->height; ++y) {
+ for (int x = 0; x < _video_size->width; ++x) {
Magick::Color c = magick_image->pixelColor (x, y);
*p++ = c.redQuantum() * 255 / QuantumRange;
*p++ = c.greenQuantum() * 255 / QuantumRange;
delete magick_image;
_image = _image->crop (_imagemagick_content->crop(), true);
- emit_video (_image, false, double (_position) / 24);
+ video (_image, false, double (_position) / 24);
++_position;
return false;
public:
ImageMagickDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const ImageMagickContent>);
- bool pass ();
- bool seek (double);
+ /* Decoder */
+
+ void pass ();
+ void seek (double);
Time next () const;
- float video_frame_rate () const {
- return 24;
- }
+ /* VideoDecoder */
- libdcp::Size native_size () const;
+ float video_frame_rate () const;
+ libdcp::Size video_size () const;
ContentVideoFrame video_length () const;
+ /* ImageMagickDecoder */
+
boost::shared_ptr<const ImageMagickContent> content () const {
return _imagemagick_content;
}
-protected:
- PixelFormat pixel_format () const;
-
private:
boost::shared_ptr<const ImageMagickContent> _imagemagick_content;
boost::shared_ptr<Image> _image;
- ContentVideoFrame _position;
- mutable boost::optional<libdcp::Size> _native_size;
+ mutable boost::optional<libdcp::Size> _video_size;
};
, _video (true)
, _audio (true)
, _subtitles (true)
- , _have_valid_decoders (false)
+ , _have_valid_pieces (false)
, _position (0)
, _audio_buffers (MAX_AUDIO_CHANNELS, 0)
, _last_video (0)
void
Player::process_video (shared_ptr<Piece> piece, shared_ptr<const Image> image, bool same, shared_ptr<Subtitle> sub, Time time)
{
- time += piece->start ();
+ time += piece->content->start ();
Video (image, same, sub, time);
}
be added to any more, so it can be emitted.
*/
- time += piece->start ();
+ time += piece->content->start ();
if (time > _next_audio) {
/* We can emit some audio from our buffers */
}
/** @return true on error */
-bool
+void
Player::seek (Time t)
{
if (!_have_valid_pieces) {
}
if (_pieces.empty ()) {
- return true;
+ return;
}
/* XXX: don't seek audio because we don't need to... */
-
- return false;
}
{
bool operator() (shared_ptr<Content> a, shared_ptr<Content> b)
{
- return a->time() < b->time();
+ return a->start() < b->start();
}
};
void disable_audio ();
void disable_subtitles ();
- bool pass ();
- bool seek (Time);
+ void pass ();
+ void seek (Time);
void seek_back ();
void seek_forward ();
+++ /dev/null
-/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-/** @file src/processor.h
- * @brief Parent class for classes which accept and then emit video or audio data.
- */
-
-#ifndef DCPOMATIC_PROCESSOR_H
-#define DCPOMATIC_PROCESSOR_H
-
-#include "video_source.h"
-#include "video_sink.h"
-#include "audio_source.h"
-#include "audio_sink.h"
-
-class Log;
-
-/** @class Processor
- * @brief Base class for processors.
- */
-class Processor
-{
-public:
- /** Construct a Processor.
- * @param log Log to use.
- */
- Processor (boost::shared_ptr<Log> log)
- : _log (log)
- {}
-
- virtual ~Processor() {}
-
- /** Will be called at the end of a processing run */
- virtual void process_end () {}
-
-protected:
- boost::shared_ptr<Log> _log; ///< log to write to
-};
-
-/** @class AudioVideoProcessor
- * @brief A processor which handles both video and audio data.
- */
-class AudioVideoProcessor : public Processor, public VideoSource, public VideoSink, public AudioSource, public AudioSink
-{
-public:
- /** Construct an AudioVideoProcessor.
- * @param log Log to write to.
- */
- AudioVideoProcessor (boost::shared_ptr<Log> log)
- : Processor (log)
- {}
-};
-
-/** @class AudioProcessor
- * @brief A processor which handles just audio data.
- */
-class AudioProcessor : public Processor, public AudioSource, public AudioSink
-{
-public:
- /** Construct an AudioProcessor.
- * @param log Log to write to.
- */
- AudioProcessor (boost::shared_ptr<Log> log)
- : Processor (log)
- {}
-};
-
-/** @class VideoProcessor
- * @brief A processor which handles just video data.
- */
-class VideoProcessor : public Processor, public VideoSource, public VideoSink
-{
-public:
- /** Construct an VideoProcessor.
- * @param log Log to write to.
- */
- VideoProcessor (boost::shared_ptr<Log> log)
- : Processor (log)
- {}
-};
-
-#endif
delete[] _deinterleave_buffer;
}
-bool
+void
SndfileDecoder::pass ()
{
/* Do things in half second blocks as I think there may be limits
Audio (audio, double(_done) / audio_frame_rate());
_done += this_time;
_remaining -= this_time;
-
- return (_remaining == 0);
}
int
SndfileDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const SndfileContent>);
~SndfileDecoder ();
- bool pass ();
+ void pass ();
Time next ();
int audio_channels () const;
#include "subtitle.h"
#include "film.h"
#include "image.h"
-#include "log.h"
-#include "job.h"
#include "i18n.h"
using std::cout;
using boost::shared_ptr;
-using boost::optional;
VideoDecoder::VideoDecoder (shared_ptr<const Film> f, shared_ptr<const VideoContent> c)
: Decoder (f)
, _next_video (0)
, _video_content (c)
+ , _frame_rate_conversion (c->video_frame_rate(), f->dcp_frame_rate())
+ , _odd (false)
{
}
-/** Called by subclasses to tell the world that some video data is ready.
- * We find a subtitle then emit it for listeners.
+/** Called by subclasses when some video is ready.
* @param image frame to emit.
+ * @param same true if this frame is the same as the last one passed to this call.
* @param t Time of the frame within the source.
*/
void
-VideoDecoder::emit_video (shared_ptr<Image> image, bool same, Time t)
+VideoDecoder::video (shared_ptr<Image> image, bool same, Time t)
{
+ if (_frame_rate_conversion.skip && _odd) {
+ _odd = !_odd;
+ return;
+ }
+
shared_ptr<Subtitle> sub;
if (_timed_subtitle && _timed_subtitle->displayed_at (t)) {
sub = _timed_subtitle->subtitle ();
}
- TIMING (N_("Decoder emits %1"), _video_frame);
Video (image, same, sub, t);
- ++_video_frame;
- /* XXX: who's doing skip / repeat? */
- _next_video = t + _film->video_frames_to_time (1);
+ if (_frame_rate_conversion.repeat) {
+ Video (image, true, sub, t + _film->video_frames_to_time (1));
+ _next_video = t + _film->video_frames_to_time (2);
+ } else {
+ _next_video = t + _film->video_frames_to_time (1);
+ }
+
+ _odd = !_odd;
}
-/** Set up the current subtitle. This will be put onto frames that
- * fit within its time specification. s may be 0 to say that there
- * is no current subtitle.
+/** Called by subclasses when a subtitle is ready.
+ * s may be 0 to say that there is no current subtitle.
* @param s New current subtitle, or 0.
*/
void
-VideoDecoder::emit_subtitle (shared_ptr<TimedSubtitle> s)
+VideoDecoder::subtitle (shared_ptr<TimedSubtitle> s)
{
_timed_subtitle = s;
#include "video_source.h"
#include "decoder.h"
+#include "util.h"
class VideoContent;
protected:
- void emit_video (boost::shared_ptr<Image>, bool, Time);
- void emit_subtitle (boost::shared_ptr<TimedSubtitle>);
+ void video (boost::shared_ptr<Image>, bool, Time);
+ void subtitle (boost::shared_ptr<TimedSubtitle>);
Time _next_video;
private:
boost::shared_ptr<const VideoContent> _video_content;
boost::shared_ptr<TimedSubtitle> _timed_subtitle;
+ FrameRateConversion _frame_rate_conversion;
+ bool _odd;
};
#endif
class Image;
/** @class VideoSource
- * @param A class that emits video data without timestamps.
+ * @param A class that emits video data.
*/
class VideoSource
{