_samples_per_point = max (int64_t (1), _film->time_to_audio_frames (_film->length()) / _num_points);
- _current.resize (MAX_AUDIO_CHANNELS);
- _analysis.reset (new AudioAnalysis (MAX_AUDIO_CHANNELS));
+ _current.resize (_film->dcp_audio_channels ());
+ _analysis.reset (new AudioAnalysis (_film->dcp_audio_channels ()));
_done = 0;
while (player->pass ()) {
/** Add data from from `from', `from_channel' to our channel `to_channel' */
void
-AudioBuffers::accumulate (AudioBuffers const * from, int from_channel, int to_channel)
+AudioBuffers::accumulate_channel (AudioBuffers const * from, int from_channel, int to_channel)
{
int const N = frames ();
assert (from->frames() == N);
}
for (int i = 0; i < _channels; ++i) {
- _data[i] = static_cast<float*> (realloc (_data[i], _frames * sizeof (float)));
+ _data[i] = static_cast<float*> (realloc (_data[i], frames * sizeof (float)));
if (!_data[i]) {
throw bad_alloc ();
}
}
+
+ _allocated_frames = frames;
+}
+
+void
+AudioBuffers::accumulate_frames (AudioBuffers const * from, int read_offset, int write_offset, int frames)
+{
+ assert (_channels == from->channels ());
+
+ for (int i = 0; i < _channels; ++i) {
+ for (int j = 0; j < frames; ++j) {
+ _data[i][j + write_offset] += from->data()[i][j + read_offset];
+ }
+ }
}
+
void copy_from (AudioBuffers const * from, int frames_to_copy, int read_offset, int write_offset);
void move (int from, int to, int frames);
- void accumulate (AudioBuffers const *, int, int);
+ void accumulate_channel (AudioBuffers const *, int, int);
+ void accumulate_frames (AudioBuffers const *, int read_offset, int write_offset, int frames);
private:
/** Number of channels */
void
AudioDecoder::audio (shared_ptr<const AudioBuffers> data, Time time)
{
- /* XXX: map audio to 5.1 */
-
/* Maybe resample */
if (_swr_context) {
(int64_t) data->frames() * _audio_content->output_audio_frame_rate() / _audio_content->content_audio_frame_rate()
) + 32;
- shared_ptr<AudioBuffers> resampled (new AudioBuffers (MAX_AUDIO_CHANNELS, max_resampled_frames));
+ shared_ptr<AudioBuffers> resampled (new AudioBuffers (data->channels(), max_resampled_frames));
/* Resample audio */
int const resampled_frames = swr_convert (
data = resampled;
}
- Audio (data, time);
-
shared_ptr<const Film> film = _film.lock ();
assert (film);
+
+ /* Remap channels */
+ shared_ptr<AudioBuffers> dcp_mapped (film->dcp_audio_channels(), data->frames());
+ dcp_mapped->make_silent ();
+ list<pair<int, libdcp::Channel> > map = _audio_content->audio_mapping().content_to_dcp ();
+ for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
+ dcp_mapped->accumulate (data, i->first, i->second);
+ }
+
+ Audio (dcp_mapped, time);
_next_audio = time + film->audio_frames_to_time (data->frames());
}
, _j2k_bandwidth (200000000)
, _dci_metadata (Config::instance()->default_dci_metadata ())
, _dcp_video_frame_rate (0)
+ , _dcp_audio_channels (MAX_AUDIO_CHANNELS)
, _dirty (false)
{
set_dci_date_today ();
<< "_" << f.first << "_" << f.second
<< "_" << scaler()->id()
<< "_" << j2k_bandwidth()
- << "_" << boost::lexical_cast<int> (colour_lut());
+ << "_" << lexical_cast<int> (colour_lut());
if (ab()) {
pair<string, string> fa = Filter::ffmpeg_strings (Config::instance()->reference_filters());
xmlpp::Document doc;
xmlpp::Element* root = doc.create_root_node ("Metadata");
- root->add_child("Version")->add_child_text (boost::lexical_cast<string> (state_version));
+ root->add_child("Version")->add_child_text (lexical_cast<string> (state_version));
root->add_child("Name")->add_child_text (_name);
root->add_child("UseDCIName")->add_child_text (_use_dci_name ? "1" : "0");
root->add_child("Scaler")->add_child_text (_scaler->id ());
root->add_child("AB")->add_child_text (_ab ? "1" : "0");
root->add_child("WithSubtitles")->add_child_text (_with_subtitles ? "1" : "0");
- root->add_child("SubtitleOffset")->add_child_text (boost::lexical_cast<string> (_subtitle_offset));
- root->add_child("SubtitleScale")->add_child_text (boost::lexical_cast<string> (_subtitle_scale));
- root->add_child("ColourLUT")->add_child_text (boost::lexical_cast<string> (_colour_lut));
- root->add_child("J2KBandwidth")->add_child_text (boost::lexical_cast<string> (_j2k_bandwidth));
+ root->add_child("SubtitleOffset")->add_child_text (lexical_cast<string> (_subtitle_offset));
+ root->add_child("SubtitleScale")->add_child_text (lexical_cast<string> (_subtitle_scale));
+ root->add_child("ColourLUT")->add_child_text (lexical_cast<string> (_colour_lut));
+ root->add_child("J2KBandwidth")->add_child_text (lexical_cast<string> (_j2k_bandwidth));
_dci_metadata.as_xml (root->add_child ("DCIMetadata"));
- root->add_child("DCPVideoFrameRate")->add_child_text (boost::lexical_cast<string> (_dcp_video_frame_rate));
+ root->add_child("DCPVideoFrameRate")->add_child_text (lexical_cast<string> (_dcp_video_frame_rate));
root->add_child("DCIDate")->add_child_text (boost::gregorian::to_iso_string (_dci_date));
+ root->add_child("DCPAudioChannels")->add_child_text (lexical_cast<string> (_dcp_audio_channels));
_playlist->as_xml (root->add_child ("Playlist"));
doc.write_to_file_formatted (file ("metadata.xml"));
_dci_metadata = DCIMetadata (f.node_child ("DCIMetadata"));
_dcp_video_frame_rate = f.number_child<int> ("DCPVideoFrameRate");
_dci_date = boost::gregorian::from_undelimited_string (f.string_child ("DCIDate"));
+ _dcp_audio_channels = f.number_child<int> ("DCPAudioChannels");
_playlist->set_from_xml (shared_from_this(), f.node_child ("Playlist"));
boost::shared_ptr<Playlist> playlist () const;
OutputAudioFrame dcp_audio_frame_rate () const;
+ int dcp_audio_channels () const;
OutputAudioFrame time_to_audio_frames (Time) const;
OutputVideoFrame time_to_video_frames (Time) const;
return _dcp_video_frame_rate;
}
+ int dcp_audio_channels () const {
+ boost::mutex::scoped_lock lm (_state_mutex);
+ return _dcp_audio_channels;
+ }
+
/* SET */
void set_directory (std::string);
int _dcp_video_frame_rate;
/** The date that we should use in a DCI name */
boost::gregorian::date _dci_date;
+ int _dcp_audio_channels;
/** true if our state has changed since we last saved it */
mutable bool _dirty;
assert (film);
return film->dcp_audio_frame_rate ();
}
+
+int
+NullContent::audio_channels () const
+{
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ return film->dcp_audio_channels ();
+}
+
return boost::shared_ptr<Content> ();
}
- int audio_channels () const {
- return MAX_AUDIO_CHANNELS;
- }
+ int audio_channels () const;
ContentAudioFrame audio_length () const {
return _audio_length;
, _subtitles (true)
, _have_valid_pieces (false)
, _position (0)
- , _audio_buffers (MAX_AUDIO_CHANNELS, 0)
+ , _audio_buffers (f->dcp_audio_channels(), 0)
, _next_audio (0)
{
_playlist->Changed.connect (bind (&Player::playlist_changed, this));
return;
}
- /* XXX: mapping */
-
/* The time of this audio may indicate that some of our buffered audio is not going to
be added to any more, so it can be emitted.
*/
if (time > _next_audio) {
/* We can emit some audio from our buffers */
- OutputAudioFrame const N = min (_film->time_to_audio_frames (time - _next_audio), static_cast<OutputAudioFrame> (_audio_buffers.frames()));
+ assert (_film->time_to_audio_frames (time - _next_audio) <= _audio_buffers.frames());
+ OutputAudioFrame const N = _film->time_to_audio_frames (time - _next_audio);
shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), N));
emit->copy_from (&_audio_buffers, N, 0, 0);
Audio (emit, _next_audio);
}
/* Now accumulate the new audio into our buffers */
- _audio_buffers.ensure_size (time - _next_audio + audio->frames());
- _audio_buffers.accumulate (audio.get(), 0, _film->time_to_audio_frames (time - _next_audio));
+ _audio_buffers.ensure_size (_audio_buffers.frames() + audio->frames());
+ _audio_buffers.accumulate_frames (audio, 0, 0, audio->frames ());
}
/** @return true on error */
assert (film);
Time const this_time = min (_audio_content->length() - _next_audio, TIME_HZ / 2);
- shared_ptr<AudioBuffers> data (new AudioBuffers (MAX_AUDIO_CHANNELS, film->time_to_audio_frames (this_time)));
+ shared_ptr<AudioBuffers> data (new AudioBuffers (film->dcp_audio_channels(), film->time_to_audio_frames (this_time)));
data->make_silent ();
audio (data, _next_audio);
}
_film->dir (_film->dcp_name()),
_film->dcp_audio_mxf_filename (),
_film->dcp_video_frame_rate (),
- MAX_AUDIO_CHANNELS,
+ _film->dcp_audio_channels (),
_film->dcp_audio_frame_rate()
)
);