#include <sys/stat.h>
#include <sys/mman.h>
-#include <pbd/error.h>
+#include "pbd/error.h"
#include <glibmm/thread.h>
-#include <pbd/xml++.h>
-#include <pbd/memento_command.h>
-#include <pbd/enumwriter.h>
-#include <pbd/stacktrace.h>
-
-#include <ardour/ardour.h>
-#include <ardour/audioengine.h>
-#include <ardour/analyser.h>
-#include <ardour/audio_diskstream.h>
-#include <ardour/utils.h>
-#include <ardour/configuration.h>
-#include <ardour/audiofilesource.h>
-#include <ardour/send.h>
-#include <ardour/region_factory.h>
-#include <ardour/audioplaylist.h>
-#include <ardour/playlist_factory.h>
-#include <ardour/cycle_timer.h>
-#include <ardour/audioregion.h>
-#include <ardour/audio_port.h>
-#include <ardour/source_factory.h>
+#include "pbd/xml++.h"
+#include "pbd/memento_command.h"
+#include "pbd/enumwriter.h"
+#include "pbd/stacktrace.h"
+
+#include "ardour/ardour.h"
+#include "ardour/audioengine.h"
+#include "ardour/analyser.h"
+#include "ardour/audio_diskstream.h"
+#include "ardour/utils.h"
+#include "ardour/configuration.h"
+#include "ardour/audiofilesource.h"
+#include "ardour/send.h"
+#include "ardour/region_factory.h"
+#include "ardour/audioplaylist.h"
+#include "ardour/playlist_factory.h"
+#include "ardour/cycle_timer.h"
+#include "ardour/audioregion.h"
+#include "ardour/audio_port.h"
+#include "ardour/source_factory.h"
+#include "ardour/audio_buffer.h"
+#include "ardour/session.h"
+#include "ardour/io.h"
#include "i18n.h"
#include <locale.h>
_n_channels.set(DataType::AUDIO, c->size());
- if (_io->n_inputs().n_audio() > _n_channels.n_audio()) {
- add_channel_to (c, _io->n_inputs().n_audio() - _n_channels.n_audio());
- } else if (_io->n_inputs().n_audio() < _n_channels.n_audio()) {
- remove_channel_from (c, _n_channels.n_audio() - _io->n_inputs().n_audio());
+ if (_io->n_ports().n_audio() > _n_channels.n_audio()) {
+ add_channel_to (c, _io->n_ports().n_audio() - _n_channels.n_audio());
+ } else if (_io->n_ports().n_audio() < _n_channels.n_audio()) {
+ remove_channel_from (c, _n_channels.n_audio() - _io->n_ports().n_audio());
}
}
}
}
+void
+AudioDiskstream::non_realtime_locate (nframes_t location)
+{
+ /* now refill channel buffers */
+
+ if (speed() != 1.0f || speed() != -1.0f) {
+ seek ((nframes_t) (location * (double) speed()));
+ } else {
+ seek (location);
+ }
+}
+
void
AudioDiskstream::get_input_sources ()
{
uint32_t n;
ChannelList::iterator chan;
- uint32_t ni = _io->n_inputs().n_audio();
+ uint32_t ni = _io->n_ports().n_audio();
vector<string> connections;
for (n = 0, chan = c->begin(); chan != c->end() && n < ni; ++chan, ++n) {
connections.clear ();
- if (_io->input(n)->get_connections (connections) == 0) {
+ if (_io->nth (n)->get_connections (connections) == 0) {
if ((*chan)->source) {
// _source->disable_metering ();
/* if per-track or global rec-enable turned on while the other was already on, we've started recording */
- if ((change & track_rec_enabled) && record_enabled() && (!(change & global_rec_enabled) && can_record) ||
+ if (((change & track_rec_enabled) && record_enabled() && (!(change & global_rec_enabled) && can_record)) ||
((change & global_rec_enabled) && can_record && (!(change & track_rec_enabled) && record_enabled()))) {
/* starting to record: compute first+last frames */
if (_alignment_style == ExistingMaterial) {
- if (!Config->get_punch_in()) {
+ if (!_session.config.get_punch_in()) {
/* manual punch in happens at the correct transport frame
because the user hit a button. but to get alignment correct
} else {
- if (Config->get_punch_in()) {
+ if (_session.config.get_punch_in()) {
first_recordable_frame += _roll_delay;
} else {
capture_start_frame -= _roll_delay;
}
int
-AudioDiskstream::process (nframes_t transport_frame, nframes_t nframes, nframes_t offset, bool can_record, bool rec_monitors_input)
+AudioDiskstream::process (nframes_t transport_frame, nframes_t nframes, bool can_record, bool rec_monitors_input)
{
uint32_t n;
boost::shared_ptr<ChannelList> c = channels.reader();
commit_should_unlock = false;
- if (!_io->active()) {
+ if (!_io || !_io->active()) {
_processed = true;
return 0;
}
(*chan)->current_playback_buffer = 0;
}
- if (nominally_recording || (_session.get_record_enabled() && Config->get_punch_in())) {
+ if (nominally_recording || (_session.get_record_enabled() && _session.config.get_punch_in())) {
OverlapType ot;
// Safeguard against situations where process() goes haywire when autopunching and last_recordable_frame < first_recordable_frame
if (nominally_recording || rec_nframes) {
- uint32_t limit = _io->n_inputs ().n_audio();
+ uint32_t limit = _io->n_ports ().n_audio();
/* one or more ports could already have been removed from _io, but our
channel setup hasn't yet been updated. prevent us from trying to
chaninfo->current_capture_buffer = chaninfo->capture_vector.buf[0];
- /* note: grab the entire port buffer, but only copy what we were supposed to for recording, and use
- rec_offset
+ /* note: grab the entire port buffer, but only copy what we were supposed to
+ for recording, and use rec_offset
*/
- AudioPort* const ap = _io->audio_input(n);
+ AudioPort* const ap = _io->audio (n);
assert(ap);
- assert(rec_nframes <= ap->get_audio_buffer().capacity());
- memcpy (chaninfo->current_capture_buffer, ap->get_audio_buffer().data(rec_nframes, offset + rec_offset), sizeof (Sample) * rec_nframes);
+ assert(rec_nframes <= ap->get_audio_buffer(nframes).capacity());
+ memcpy (chaninfo->current_capture_buffer, ap->get_audio_buffer (rec_nframes).data(rec_offset), sizeof (Sample) * rec_nframes);
+
} else {
goto out;
}
- AudioPort* const ap = _io->audio_input(n);
+ AudioPort* const ap = _io->audio (n);
assert(ap);
- Sample* buf = ap->get_audio_buffer().data(nframes, offset);
+ Sample* buf = ap->get_audio_buffer(nframes).data();
nframes_t first = chaninfo->capture_vector.len[0];
memcpy (chaninfo->capture_wrap_buffer, buf, sizeof (Sample) * first);
} else {
- memcpy ((char *) chaninfo->playback_wrap_buffer, chaninfo->playback_vector.buf[0],
- chaninfo->playback_vector.len[0] * sizeof (Sample));
- memcpy (chaninfo->playback_wrap_buffer + chaninfo->playback_vector.len[0], chaninfo->playback_vector.buf[1],
- (necessary_samples - chaninfo->playback_vector.len[0]) * sizeof (Sample));
+ memcpy ((char *) chaninfo->playback_wrap_buffer,
+ chaninfo->playback_vector.buf[0],
+ chaninfo->playback_vector.len[0] * sizeof (Sample));
+ memcpy (chaninfo->playback_wrap_buffer + chaninfo->playback_vector.len[0],
+ chaninfo->playback_vector.buf[1],
+ (necessary_samples - chaninfo->playback_vector.len[0])
+ * sizeof (Sample));
chaninfo->current_playback_buffer = chaninfo->playback_wrap_buffer;
}
}
if (rec_nframes == 0 && _actual_speed != 1.0f && _actual_speed != -1.0f) {
-
- uint64_t phase = last_phase;
- int64_t phi_delta;
- nframes_t i = 0;
-
- // Linearly interpolate into the alt buffer
- // using 40.24 fixp maths (swh)
-
- if (phi != target_phi) {
- phi_delta = ((int64_t)(target_phi - phi)) / nframes;
- } else {
- phi_delta = 0;
- }
-
- for (chan = c->begin(); chan != c->end(); ++chan) {
-
- float fr;
- ChannelInfo* chaninfo (*chan);
-
- i = 0;
- phase = last_phase;
-
- for (nframes_t outsample = 0; outsample < nframes; ++outsample) {
- i = phase >> 24;
- fr = (phase & 0xFFFFFF) / 16777216.0f;
- chaninfo->speed_buffer[outsample] =
- chaninfo->current_playback_buffer[i] * (1.0f - fr) +
- chaninfo->current_playback_buffer[i+1] * fr;
- phase += phi + phi_delta;
- }
-
- chaninfo->current_playback_buffer = chaninfo->speed_buffer;
- }
-
- playback_distance = i; // + 1;
- last_phase = (phase & 0xFFFFFF);
-
+ process_varispeed_playback(nframes, c);
} else {
playback_distance = nframes;
}
- phi = target_phi;
+ _speed = _target_speed;
}
return ret;
}
+void
+AudioDiskstream::process_varispeed_playback(nframes_t nframes, boost::shared_ptr<ChannelList> c)
+{
+ ChannelList::iterator chan;
+
+ /*
+ interpolation.set_speed (_target_speed);
+
+ int channel = 0;
+ for (chan = c->begin(); chan != c->end(); ++chan, ++channel) {
+ ChannelInfo* chaninfo (*chan);
+
+ playback_distance = interpolation.interpolate (
+ channel, nframes, chaninfo->current_playback_buffer, chaninfo->speed_buffer);
+ }
+ */
+
+ // the idea behind phase is that when the speed is not 1.0, we have to
+ // interpolate between samples and then we have to store where we thought we were.
+ // rather than being at sample N or N+1, we were at N+0.8792922
+ // so the "phase" element, if you want to think about this way,
+ // varies from 0 to 1, representing the "offset" between samples
+ uint64_t phase = interpolation.get_last_phase();
+
+ interpolation.set_speed (_target_speed);
+
+ // acceleration
+ uint64_t phi = interpolation.get_phi();
+ uint64_t target_phi = interpolation.get_target_phi();
+ int64_t phi_delta;
+
+ // index in the input buffers
+ nframes_t i = 0;
+
+ // Linearly interpolate into the speed buffer
+ // using 40.24 fixed point math
+ //
+ // Fixed point is just an integer with an implied scaling factor.
+ // In 40.24 the scaling factor is 2^24 = 16777216,
+ // so a value of 10*2^24 (in integer space) is equivalent to 10.0.
+ //
+ // The advantage is that addition and modulus [like x = (x + y) % 2^40]
+ // have no rounding errors and no drift, and just require a single integer add.
+ // (swh)
+
+ const int64_t fractional_part_mask = 0xFFFFFF;
+ const Sample binary_scaling_factor = 16777216.0f;
+
+ // phi = fixed point speed
+ if (phi != target_phi) {
+ phi_delta = ((int64_t)(target_phi - phi)) / nframes;
+ } else {
+ phi_delta = 0;
+ }
+
+ for (chan = c->begin(); chan != c->end(); ++chan) {
+
+ Sample fractional_phase_part;
+ ChannelInfo* chaninfo (*chan);
+
+ i = 0;
+ phase = interpolation.get_last_phase();
+
+ for (nframes_t outsample = 0; outsample < nframes; ++outsample) {
+ i = phase >> 24;
+ fractional_phase_part = (phase & fractional_part_mask) / binary_scaling_factor;
+ chaninfo->speed_buffer[outsample] =
+ chaninfo->current_playback_buffer[i] * (1.0f - fractional_phase_part) +
+ chaninfo->current_playback_buffer[i+1] * fractional_phase_part;
+ phase += phi + phi_delta;
+ }
+
+ chaninfo->current_playback_buffer = chaninfo->speed_buffer;
+ }
+
+ playback_distance = i; // + 1;
+ interpolation.set_last_phase (phase & fractional_part_mask);
+}
+
bool
AudioDiskstream::commit (nframes_t nframes)
{
bool need_butler = false;
- if (!_io->active()) {
+ if (!_io || !_io->active()) {
return false;
}
}
if (_slaved) {
- /*if (_io && _io->active()) {*/
+ if (_io && _io->active()) {
need_butler = c->front()->playback_buf->write_space() >= c->front()->playback_buf->bufsize() / 2;
- /*} else {
+ } else {
need_butler = false;
- }*/
+ }
} else {
- /*if (_io && _io->active()) {*/
+ if (_io && _io->active()) {
need_butler = c->front()->playback_buf->write_space() >= disk_io_chunk_frames
|| c->front()->capture_buf->read_space() >= disk_io_chunk_frames;
- /*} else {
+ } else {
need_butler = c->front()->capture_buf->read_space() >= disk_io_chunk_frames;
- }*/
+ }
}
if (commit_should_unlock) {
* written at all unless @a force_flush is true.
*/
int
-AudioDiskstream::do_flush (Session::RunContext context, bool force_flush)
+AudioDiskstream::do_flush (RunContext context, bool force_flush)
{
uint32_t to_write;
int32_t ret = 0;
*/
while (more_work && !err) {
- switch (do_flush (Session::TransportContext, true)) {
+ switch (do_flush (TransportContext, true)) {
case 0:
more_work = false;
break;
*/
try {
- boost::shared_ptr<Region> rx (RegionFactory::create (srcs, c->front()->write_source->last_capture_start_frame(), total_capture,
- whole_file_region_name,
- 0, AudioRegion::Flag (AudioRegion::DefaultFlags|AudioRegion::Automatic|AudioRegion::WholeFile)));
+ boost::shared_ptr<Region> rx (RegionFactory::create (srcs,
+ c->front()->write_source->last_capture_start_frame(), total_capture,
+ whole_file_region_name, 0,
+ Region::Flag (Region::DefaultFlags|Region::Automatic|Region::WholeFile)));
region = boost::dynamic_pointer_cast<AudioRegion> (rx);
region->special_set_position (capture_info.front()->start);
_last_capture_regions.push_back (region);
i_am_the_modifier++;
- _playlist->add_region (region, (*ci)->start);
+ _playlist->add_region (region, (*ci)->start, 1, non_layered());
i_am_the_modifier--;
buffer_position += (*ci)->frames;
void
AudioDiskstream::set_record_enabled (bool yn)
{
- if (!recordable() || !_session.record_enabling_legal() || _io->n_inputs().n_audio() == 0) {
+ if (!recordable() || !_session.record_enabling_legal() || _io->n_ports().n_audio() == 0) {
return;
}
for (ChannelList::iterator chan = c->begin(); chan != c->end(); ++chan) {
if ((*chan)->source) {
- (*chan)->source->ensure_monitor_input (!(Config->get_auto_input() && rolling));
+ (*chan)->source->ensure_monitor_input (!(_session.config.get_auto_input() && rolling));
}
capturing_sources.push_back ((*chan)->write_source);
(*chan)->write_source->mark_streaming_write_started ();
Location* pi;
- if (Config->get_punch_in() && ((pi = _session.locations()->auto_punch_location()) != 0)) {
+ if (_session.config.get_punch_in() && ((pi = _session.locations()->auto_punch_location()) != 0)) {
snprintf (buf, sizeof (buf), "%" PRIu32, pi->start());
} else {
snprintf (buf, sizeof (buf), "%" PRIu32, _session.transport_frame());
boost::shared_ptr<ChannelList> c = channels.reader();
for (ChannelList::iterator chan = c->begin(); chan != c->end(); ++chan) {
- if ((*chan)->speed_buffer) delete [] (*chan)->speed_buffer;
+ if ((*chan)->speed_buffer)
+ delete [] (*chan)->speed_buffer;
(*chan)->speed_buffer = new Sample[speed_buffer_size];
}
}
boost::shared_ptr<ChannelList> c = channels.reader();
for (ChannelList::iterator chan = c->begin(); chan != c->end(); ++chan) {
- if ((*chan)->playback_wrap_buffer) delete [] (*chan)->playback_wrap_buffer;
+ if ((*chan)->playback_wrap_buffer)
+ delete [] (*chan)->playback_wrap_buffer;
(*chan)->playback_wrap_buffer = new Sample[required_wrap_size];
- if ((*chan)->capture_wrap_buffer) delete [] (*chan)->capture_wrap_buffer;
+ if ((*chan)->capture_wrap_buffer)
+ delete [] (*chan)->capture_wrap_buffer;
(*chan)->capture_wrap_buffer = new Sample[required_wrap_size];
}
int
AudioDiskstream::add_channel_to (boost::shared_ptr<ChannelList> c, uint32_t how_many)
{
+
while (how_many--) {
- c->push_back (new ChannelInfo(_session.diskstream_buffer_size(), speed_buffer_size, wrap_buffer_size));
+ c->push_back (new ChannelInfo(_session.audio_diskstream_buffer_size(), speed_buffer_size, wrap_buffer_size));
+ interpolation.add_channel_to (_session.audio_diskstream_buffer_size(), speed_buffer_size);
}
_n_channels.set(DataType::AUDIO, c->size());
-
+
return 0;
}
AudioDiskstream::remove_channel_from (boost::shared_ptr<ChannelList> c, uint32_t how_many)
{
while (how_many-- && !c->empty()) {
- delete c->back();
+ // FIXME: crash (thread safe with RCU?)
+ // memory leak, when disabled.... :(
+ //delete c->back();
c->pop_back();
+ interpolation.remove_channel_from ();
}
_n_channels.set(DataType::AUDIO, c->size());
try {
fs = boost::dynamic_pointer_cast<AudioFileSource> (
- SourceFactory::createWritable (DataType::AUDIO, _session, prop->value(), false, _session.frame_rate()));
+ SourceFactory::createWritable (DataType::AUDIO, _session,
+ prop->value(), true,
+ false, _session.frame_rate()));
}
catch (failed_constructor& err) {
boost::shared_ptr<AudioRegion> region;
try {
- region = boost::dynamic_pointer_cast<AudioRegion> (RegionFactory::create (pending_sources, 0, first_fs->length(),
- region_name_from_path (first_fs->name(), true),
- 0, AudioRegion::Flag (AudioRegion::DefaultFlags|AudioRegion::Automatic|AudioRegion::WholeFile)));
+ region = boost::dynamic_pointer_cast<AudioRegion> (RegionFactory::create (
+ pending_sources, 0, first_fs->length(first_fs->timeline_position()),
+ region_name_from_path (first_fs->name(), true), 0,
+ Region::Flag (Region::DefaultFlags|Region::Automatic|Region::WholeFile)));
region->special_set_position (0);
}
catch (failed_constructor& err) {
- error << string_compose (_("%1: cannot create whole-file region from pending capture sources"),
- _name)
- << endmsg;
+ error << string_compose (
+ _("%1: cannot create whole-file region from pending capture sources"),
+ _name) << endmsg;
return -1;
}
try {
- region = boost::dynamic_pointer_cast<AudioRegion> (RegionFactory::create (pending_sources, 0, first_fs->length(), region_name_from_path (first_fs->name(), true)));
+ region = boost::dynamic_pointer_cast<AudioRegion> (RegionFactory::create (
+ pending_sources, 0, first_fs->length(first_fs->timeline_position()),
+ region_name_from_path (first_fs->name(), true)));
}
catch (failed_constructor& err) {
return 0;
}
+int
+AudioDiskstream::set_non_layered (bool yn)
+{
+ if (yn != non_layered()) {
+
+ if (yn) {
+ _flags = Flag (_flags | NonLayered);
+ } else {
+ _flags = Flag (_flags & ~NonLayered);
+ }
+ }
+
+ return 0;
+}
+
int
AudioDiskstream::set_destructive (bool yn)
{
write_source.reset ();
}
- if (speed_buffer) {
- delete [] speed_buffer;
- speed_buffer = 0;
- }
+ delete [] speed_buffer;
+ speed_buffer = 0;
- if (playback_wrap_buffer) {
- delete [] playback_wrap_buffer;
- playback_wrap_buffer = 0;
- }
+ delete [] playback_wrap_buffer;
+ playback_wrap_buffer = 0;
- if (capture_wrap_buffer) {
- delete [] capture_wrap_buffer;
- capture_wrap_buffer = 0;
- }
+ delete [] capture_wrap_buffer;
+ capture_wrap_buffer = 0;
- if (playback_buf) {
- delete playback_buf;
- playback_buf = 0;
- }
+ delete playback_buf;
+ playback_buf = 0;
- if (capture_buf) {
- delete capture_buf;
- capture_buf = 0;
- }
+ delete capture_buf;
+ capture_buf = 0;
- if (capture_transition_buf) {
- delete capture_transition_buf;
- capture_transition_buf = 0;
- }
+ delete capture_transition_buf;
+ capture_transition_buf = 0;
}