X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;ds=sidebyside;f=libs%2Fardour%2Faudioregion.cc;h=0a33119ecf29b42e315084a9f821e1db83de2b3f;hb=b23d7d9170a12cbcd788e1c2cbcf688f376752ed;hp=e4c4b55a545defccc9fd7f0fe25039c41738d734;hpb=a017411dfab6d85fdbdcb9d4fd17a05f0ee2cc2a;p=ardour.git diff --git a/libs/ardour/audioregion.cc b/libs/ardour/audioregion.cc index e4c4b55a54..0a33119ecf 100644 --- a/libs/ardour/audioregion.cc +++ b/libs/ardour/audioregion.cc @@ -24,8 +24,10 @@ #include +#include +#include -#include +#include #include "pbd/basename.h" #include "pbd/xml++.h" @@ -36,17 +38,24 @@ #include "evoral/Curve.hpp" #include "ardour/audioregion.h" -#include "ardour/debug.h" #include "ardour/session.h" -#include "ardour/gain.h" #include "ardour/dB.h" +#include "ardour/debug.h" +#include "ardour/event_type_map.h" #include "ardour/playlist.h" #include "ardour/audiofilesource.h" #include "ardour/region_factory.h" #include "ardour/runtime_functions.h" #include "ardour/transient_detector.h" +#include "ardour/parameter_descriptor.h" +#include "ardour/progress.h" -#include "i18n.h" +#include "ardour/sndfilesource.h" +#ifdef HAVE_COREAUDIO +#include "ardour/coreaudiosource.h" +#endif // HAVE_COREAUDIO + +#include "pbd/i18n.h" #include using namespace std; @@ -61,6 +70,79 @@ namespace ARDOUR { PBD::PropertyDescriptor fade_in_active; PBD::PropertyDescriptor fade_out_active; PBD::PropertyDescriptor scale_amplitude; + PBD::PropertyDescriptor > fade_in; + PBD::PropertyDescriptor > inverse_fade_in; + PBD::PropertyDescriptor > fade_out; + PBD::PropertyDescriptor > inverse_fade_out; + PBD::PropertyDescriptor > envelope; + } +} + +/* Curve manipulations */ + +static void +reverse_curve (boost::shared_ptr dst, boost::shared_ptr src) +{ + size_t len = src->back()->when; + for (Evoral::ControlList::const_reverse_iterator it = src->rbegin(); it!=src->rend(); it++) { + dst->fast_simple_add (len - (*it)->when, (*it)->value); + } +} + +static void +generate_inverse_power_curve (boost::shared_ptr dst, boost::shared_ptr src) +{ + // calc inverse curve using sum of squares + for (Evoral::ControlList::const_iterator it = src->begin(); it!=src->end(); ++it ) { + float value = (*it)->value; + value = 1 - powf(value,2); + value = sqrtf(value); + dst->fast_simple_add ( (*it)->when, value ); + } +} + +static void +generate_db_fade (boost::shared_ptr dst, double len, int num_steps, float dB_drop) +{ + dst->clear (); + dst->fast_simple_add (0, 1); + + //generate a fade-out curve by successively applying a gain drop + float fade_speed = dB_to_coefficient(dB_drop / (float) num_steps); + float coeff = GAIN_COEFF_UNITY; + for (int i = 1; i < (num_steps-1); i++) { + coeff *= fade_speed; + dst->fast_simple_add (len*(double)i/(double)num_steps, coeff); + } + + dst->fast_simple_add (len, GAIN_COEFF_SMALL); +} + +static void +merge_curves (boost::shared_ptr dst, + boost::shared_ptr curve1, + boost::shared_ptr curve2) +{ + Evoral::ControlList::EventList::size_type size = curve1->size(); + + //curve lengths must match for now + if (size != curve2->size()) { + return; + } + + Evoral::ControlList::const_iterator c1 = curve1->begin(); + int count = 0; + for (Evoral::ControlList::const_iterator c2 = curve2->begin(); c2!=curve2->end(); c2++ ) { + float v1 = accurate_coefficient_to_dB((*c1)->value); + float v2 = accurate_coefficient_to_dB((*c2)->value); + + double interp = v1 * ( 1.0-( (double)count / (double)size) ); + interp += v2 * ( (double)count / (double)size ); + + interp = dB_to_coefficient(interp); + dst->fast_simple_add ( (*c1)->when, interp ); + c1++; + count++; } } @@ -68,17 +150,27 @@ void AudioRegion::make_property_quarks () { Properties::envelope_active.property_id = g_quark_from_static_string (X_("envelope-active")); - DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for envelope-active = %1\n", Properties::envelope_active.property_id)); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for envelope-active = %1\n", Properties::envelope_active.property_id)); Properties::default_fade_in.property_id = g_quark_from_static_string (X_("default-fade-in")); - DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-in = %1\n", Properties::default_fade_in.property_id)); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-in = %1\n", Properties::default_fade_in.property_id)); Properties::default_fade_out.property_id = g_quark_from_static_string (X_("default-fade-out")); - DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-out = %1\n", Properties::default_fade_out.property_id)); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-out = %1\n", Properties::default_fade_out.property_id)); Properties::fade_in_active.property_id = g_quark_from_static_string (X_("fade-in-active")); - DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-in-active = %1\n", Properties::fade_in_active.property_id)); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-in-active = %1\n", Properties::fade_in_active.property_id)); Properties::fade_out_active.property_id = g_quark_from_static_string (X_("fade-out-active")); - DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-out-active = %1\n", Properties::fade_out_active.property_id)); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-out-active = %1\n", Properties::fade_out_active.property_id)); Properties::scale_amplitude.property_id = g_quark_from_static_string (X_("scale-amplitude")); - DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for scale-amplitude = %1\n", Properties::scale_amplitude.property_id)); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for scale-amplitude = %1\n", Properties::scale_amplitude.property_id)); + Properties::fade_in.property_id = g_quark_from_static_string (X_("FadeIn")); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for FadeIn = %1\n", Properties::fade_in.property_id)); + Properties::inverse_fade_in.property_id = g_quark_from_static_string (X_("InverseFadeIn")); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for InverseFadeIn = %1\n", Properties::inverse_fade_in.property_id)); + Properties::fade_out.property_id = g_quark_from_static_string (X_("FadeOut")); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for FadeOut = %1\n", Properties::fade_out.property_id)); + Properties::inverse_fade_out.property_id = g_quark_from_static_string (X_("InverseFadeOut")); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for InverseFadeOut = %1\n", Properties::inverse_fade_out.property_id)); + Properties::envelope.property_id = g_quark_from_static_string (X_("Envelope")); + DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for Envelope = %1\n", Properties::envelope.property_id)); } void @@ -92,6 +184,11 @@ AudioRegion::register_properties () add_property (_fade_in_active); add_property (_fade_out_active); add_property (_scale_amplitude); + add_property (_fade_in); + add_property (_inverse_fade_in); + add_property (_fade_out); + add_property (_inverse_fade_out); + add_property (_envelope); } #define AUDIOREGION_STATE_DEFAULT \ @@ -100,15 +197,23 @@ AudioRegion::register_properties () , _default_fade_out (Properties::default_fade_out, true) \ , _fade_in_active (Properties::fade_in_active, true) \ , _fade_out_active (Properties::fade_out_active, true) \ - , _scale_amplitude (Properties::scale_amplitude, 1.0) - + , _scale_amplitude (Properties::scale_amplitude, 1.0) \ + , _fade_in (Properties::fade_in, boost::shared_ptr (new AutomationList (Evoral::Parameter (FadeInAutomation)))) \ + , _inverse_fade_in (Properties::inverse_fade_in, boost::shared_ptr (new AutomationList (Evoral::Parameter (FadeInAutomation)))) \ + , _fade_out (Properties::fade_out, boost::shared_ptr (new AutomationList (Evoral::Parameter (FadeOutAutomation)))) \ + , _inverse_fade_out (Properties::inverse_fade_out, boost::shared_ptr (new AutomationList (Evoral::Parameter (FadeOutAutomation)))) + #define AUDIOREGION_COPY_STATE(other) \ _envelope_active (Properties::envelope_active, other->_envelope_active) \ , _default_fade_in (Properties::default_fade_in, other->_default_fade_in) \ , _default_fade_out (Properties::default_fade_out, other->_default_fade_out) \ , _fade_in_active (Properties::fade_in_active, other->_fade_in_active) \ , _fade_out_active (Properties::fade_out_active, other->_fade_out_active) \ - , _scale_amplitude (Properties::scale_amplitude, other->_scale_amplitude) + , _scale_amplitude (Properties::scale_amplitude, other->_scale_amplitude) \ + , _fade_in (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_fade_in.val()))) \ + , _inverse_fade_in (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_inverse_fade_in.val()))) \ + , _fade_out (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_fade_out.val()))) \ + , _inverse_fade_out (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_inverse_fade_out.val()))) \ /* a Session will reset these to its chosen defaults by calling AudioRegion::set_default_fade() */ void @@ -130,10 +235,8 @@ AudioRegion::init () AudioRegion::AudioRegion (Session& s, framepos_t start, framecnt_t len, std::string name) : Region (s, start, len, name, DataType::AUDIO) , AUDIOREGION_STATE_DEFAULT + , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList (Evoral::Parameter(EnvelopeAutomation)))) , _automatable (s) - , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation))) - , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation))) - , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation))) , _fade_in_suspended (0) , _fade_out_suspended (0) { @@ -145,10 +248,8 @@ AudioRegion::AudioRegion (Session& s, framepos_t start, framecnt_t len, std::str AudioRegion::AudioRegion (const SourceList& srcs) : Region (srcs) , AUDIOREGION_STATE_DEFAULT + , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList (Evoral::Parameter(EnvelopeAutomation)))) , _automatable(srcs[0]->session()) - , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation))) - , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation))) - , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation))) , _fade_in_suspended (0) , _fade_out_suspended (0) { @@ -156,16 +257,36 @@ AudioRegion::AudioRegion (const SourceList& srcs) assert (_sources.size() == _master_sources.size()); } -AudioRegion::AudioRegion (boost::shared_ptr other, framecnt_t offset, bool offset_relative) - : Region (other, offset, offset_relative) +AudioRegion::AudioRegion (boost::shared_ptr other) + : Region (other) , AUDIOREGION_COPY_STATE (other) + /* As far as I can see, the _envelope's times are relative to region position, and have nothing + to do with sources (and hence _start). So when we copy the envelope, we just use the supplied offset. + */ + , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList (*other->_envelope.val(), 0, other->_length))) , _automatable (other->session()) - , _fade_in (new AutomationList (*other->_fade_in)) - , _fade_out (new AutomationList (*other->_fade_out)) + , _fade_in_suspended (0) + , _fade_out_suspended (0) +{ + /* don't use init here, because we got fade in/out from the other region + */ + register_properties (); + listen_to_my_curves (); + connect_to_analysis_changed (); + connect_to_header_position_offset_changed (); + + assert(_type == DataType::AUDIO); + assert (_sources.size() == _master_sources.size()); +} + +AudioRegion::AudioRegion (boost::shared_ptr other, MusicFrame offset) + : Region (other, offset) + , AUDIOREGION_COPY_STATE (other) /* As far as I can see, the _envelope's times are relative to region position, and have nothing to do with sources (and hence _start). So when we copy the envelope, we just use the supplied offset. */ - , _envelope (new AutomationList (*other->_envelope, offset, other->_length)) + , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList (*other->_envelope.val(), offset.frame, other->_length))) + , _automatable (other->session()) , _fade_in_suspended (0) , _fade_out_suspended (0) { @@ -183,10 +304,8 @@ AudioRegion::AudioRegion (boost::shared_ptr other, framecnt_t AudioRegion::AudioRegion (boost::shared_ptr other, const SourceList& srcs) : Region (boost::static_pointer_cast(other), srcs) , AUDIOREGION_COPY_STATE (other) + , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList (*other->_envelope.val()))) , _automatable (other->session()) - , _fade_in (new AutomationList (*other->_fade_in)) - , _fade_out (new AutomationList (*other->_fade_out)) - , _envelope (new AutomationList (*other->_envelope)) , _fade_in_suspended (0) , _fade_out_suspended (0) { @@ -204,10 +323,8 @@ AudioRegion::AudioRegion (boost::shared_ptr other, const Sour AudioRegion::AudioRegion (SourceList& srcs) : Region (srcs) , AUDIOREGION_STATE_DEFAULT + , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))) , _automatable(srcs[0]->session()) - , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation))) - , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation))) - , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation))) , _fade_in_suspended (0) , _fade_out_suspended (0) { @@ -222,7 +339,7 @@ AudioRegion::~AudioRegion () } void -AudioRegion::post_set () +AudioRegion::post_set (const PropertyChange& /*ignored*/) { if (!_sync_marked) { _sync_position = _start; @@ -233,7 +350,7 @@ AudioRegion::post_set () if (_left_of_split) { if (_fade_in->back()->when >= _length) { set_default_fade_in (); - } + } set_default_fade_out (); _left_of_split = false; } @@ -241,7 +358,7 @@ AudioRegion::post_set () if (_right_of_split) { if (_fade_out->back()->when >= _length) { set_default_fade_out (); - } + } set_default_fade_in (); _right_of_split = false; @@ -255,7 +372,7 @@ void AudioRegion::connect_to_analysis_changed () { for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) { - (*i)->AnalysisChanged.connect_same_thread (*this, boost::bind (&AudioRegion::invalidate_transients, this)); + (*i)->AnalysisChanged.connect_same_thread (*this, boost::bind (&AudioRegion::maybe_invalidate_transients, this)); } } @@ -266,8 +383,8 @@ AudioRegion::connect_to_header_position_offset_changed () for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) { - /* connect only once to HeaderPositionOffsetChanged, even if sources are replicated - */ + /* connect only once to HeaderPositionOffsetChanged, even if sources are replicated + */ if (unique_srcs.find (*i) == unique_srcs.end ()) { unique_srcs.insert (*i); @@ -296,243 +413,353 @@ AudioRegion::set_envelope_active (bool yn) } } -ARDOUR::nframes_t -AudioRegion::read_peaks (PeakData *buf, nframes_t npeaks, nframes_t offset, nframes_t cnt, uint32_t chan_n, double samples_per_unit) const +/** @param buf Buffer to put peak data in. + * @param npeaks Number of peaks to read (ie the number of PeakDatas in buf) + * @param offset Start position, as an offset from the start of this region's source. + * @param cnt Number of samples to read. + * @param chan_n Channel. + * @param frames_per_pixel Number of samples to use to generate one peak value. + */ + +ARDOUR::framecnt_t +AudioRegion::read_peaks (PeakData *buf, framecnt_t npeaks, framecnt_t offset, framecnt_t cnt, uint32_t chan_n, double frames_per_pixel) const { if (chan_n >= _sources.size()) { return 0; } - if (audio_source(chan_n)->read_peaks (buf, npeaks, offset, cnt, samples_per_unit)) { + if (audio_source(chan_n)->read_peaks (buf, npeaks, offset, cnt, frames_per_pixel)) { return 0; - } else { - if (_scale_amplitude != 1.0f) { - for (nframes_t n = 0; n < npeaks; ++n) { - buf[n].max *= _scale_amplitude; - buf[n].min *= _scale_amplitude; - } + } + + if (_scale_amplitude != 1.0f) { + for (framecnt_t n = 0; n < npeaks; ++n) { + buf[n].max *= _scale_amplitude; + buf[n].min *= _scale_amplitude; } - return cnt; } -} -framecnt_t -AudioRegion::read (Sample* buf, framepos_t timeline_position, framecnt_t cnt, int channel) const -{ - /* raw read, no fades, no gain, nada */ - return _read_at (_sources, _length, buf, 0, 0, _position + timeline_position, cnt, channel, 0, 0, ReadOps (0)); + return npeaks; } +/** @param buf Buffer to write data to (existing data will be overwritten). + * @param pos Position to read from as an offset from the region position. + * @param cnt Number of frames to read. + * @param channel Channel to read from. + */ framecnt_t -AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, - framepos_t file_position, framecnt_t cnt, uint32_t chan_n, - framecnt_t read_frames, framecnt_t skip_frames) const +AudioRegion::read (Sample* buf, framepos_t pos, framecnt_t cnt, int channel) const { - /* regular diskstream/butler read complete with fades etc */ - return _read_at (_sources, _length, buf, mixdown_buffer, gain_buffer, - file_position, cnt, chan_n, read_frames, skip_frames, ReadOps (~0)); + /* raw read, no fades, no gain, nada */ + return read_from_sources (_sources, _length, buf, _position + pos, cnt, channel); } framecnt_t -AudioRegion::master_read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, +AudioRegion::master_read_at (Sample *buf, Sample* /*mixdown_buffer*/, float* /*gain_buffer*/, framepos_t position, framecnt_t cnt, uint32_t chan_n) const { /* do not read gain/scaling/fades and do not count this disk i/o in statistics */ - return _read_at (_master_sources, _master_sources.front()->length(_master_sources.front()->timeline_position()), - buf, mixdown_buffer, gain_buffer, position, cnt, chan_n, 0, 0, ReadOps (0)); + assert (cnt >= 0); + return read_from_sources ( + _master_sources, _master_sources.front()->length (_master_sources.front()->timeline_position()), + buf, position, cnt, chan_n + ); } +/** @param buf Buffer to mix data into. + * @param mixdown_buffer Scratch buffer for audio data. + * @param gain_buffer Scratch buffer for gain data. + * @param position Position within the session to read from. + * @param cnt Number of frames to read. + * @param chan_n Channel number to read. + */ framecnt_t -AudioRegion::_read_at (const SourceList& /*srcs*/, framecnt_t limit, - Sample *buf, Sample *mixdown_buffer, float *gain_buffer, - framepos_t position, - framecnt_t cnt, - uint32_t chan_n, - framecnt_t /*read_frames*/, - framecnt_t /*skip_frames*/, - ReadOps rops) const -{ - frameoffset_t internal_offset; - frameoffset_t buf_offset; - framecnt_t to_read; - bool raw = (rops == ReadOpsNone); +AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, + framepos_t position, + framecnt_t cnt, + uint32_t chan_n) const +{ + /* We are reading data from this region into buf (possibly via mixdown_buffer). + The caller has verified that we cover the desired section. + */ - if (n_channels() == 0) { - return 0; - } + /* See doc/region_read.svg for a drawing which might help to explain + what is going on. + */ - if (muted() && !raw) { - return 0; /* read nothing */ + assert (cnt >= 0); + + if (n_channels() == 0) { + return 0; } - /* precondition: caller has verified that we cover the desired section */ + /* WORK OUT WHERE TO GET DATA FROM */ - if (position < _position) { - internal_offset = 0; - buf_offset = _position - position; - cnt -= buf_offset; - } else { - internal_offset = position - _position; - buf_offset = 0; - } + framecnt_t to_read; - if (internal_offset >= limit) { + assert (position >= _position); + frameoffset_t const internal_offset = position - _position; + + if (internal_offset >= _length) { return 0; /* read nothing */ } - if ((to_read = min (cnt, limit - internal_offset)) == 0) { + if ((to_read = min (cnt, _length - internal_offset)) == 0) { return 0; /* read nothing */ } - if (opaque() || raw) { - /* overwrite whatever is there */ - mixdown_buffer = buf + buf_offset; - } else { - mixdown_buffer += buf_offset; - } - if (rops & ReadOpsCount) { - _read_data_count = 0; - } + /* COMPUTE DETAILS OF ANY FADES INVOLVED IN THIS READ */ - if (chan_n < n_channels()) { + /* Amount (length) of fade in that we are dealing with in this read */ + framecnt_t fade_in_limit = 0; - boost::shared_ptr src = audio_source(chan_n); - if (src->read (mixdown_buffer, _start + internal_offset, to_read) != to_read) { - return 0; /* "read nothing" */ - } + /* Offset from buf / mixdown_buffer of the start + of any fade out that we are dealing with + */ + frameoffset_t fade_out_offset = 0; - if (rops & ReadOpsCount) { - _read_data_count += src->read_data_count(); - } + /* Amount (length) of fade out that we are dealing with in this read */ + framecnt_t fade_out_limit = 0; - } else { + framecnt_t fade_interval_start = 0; - /* track is N-channel, this region has less channels; silence the ones - we don't have. - */ + /* Fade in */ - if (Config->get_replicate_missing_region_channels()) { - /* track is N-channel, this region has less channels, so use a relevant channel - */ - - uint32_t channel = n_channels() % chan_n; - boost::shared_ptr src = audio_source (channel); - - if (src->read (mixdown_buffer, _start + internal_offset, to_read) != to_read) { - return 0; /* "read nothing" */ - } - - /* adjust read data count appropriately since this was a duplicate read */ - src->dec_read_data_count (to_read); - } else { - memset (mixdown_buffer, 0, sizeof (Sample) * cnt); - } + if (_fade_in_active && _session.config.get_use_region_fades()) { + + framecnt_t fade_in_length = (framecnt_t) _fade_in->back()->when; + + /* see if this read is within the fade in */ + + if (internal_offset < fade_in_length) { + fade_in_limit = min (to_read, fade_in_length - internal_offset); + } } - if (rops & ReadOpsFades) { + /* Fade out */ - /* fade in */ + if (_fade_out_active && _session.config.get_use_region_fades()) { - if (_fade_in_active && _session.config.get_use_region_fades()) { + /* see if some part of this read is within the fade out */ - nframes_t fade_in_length = (nframes_t) _fade_in->back()->when; + /* ................. >| REGION + * _length + * + * { } FADE + * fade_out_length + * ^ + * _length - fade_out_length + * + * |--------------| + * ^internal_offset + * ^internal_offset + to_read + * + * we need the intersection of [internal_offset,internal_offset+to_read] with + * [_length - fade_out_length, _length] + * + */ - /* see if this read is within the fade in */ + fade_interval_start = max (internal_offset, _length - framecnt_t (_fade_out->back()->when)); + framecnt_t fade_interval_end = min(internal_offset + to_read, _length.val()); - if (internal_offset < fade_in_length) { + if (fade_interval_end > fade_interval_start) { + /* (part of the) the fade out is in this buffer */ + fade_out_limit = fade_interval_end - fade_interval_start; + fade_out_offset = fade_interval_start - internal_offset; + } + } - nframes_t fi_limit; + /* READ DATA FROM THE SOURCE INTO mixdown_buffer. + We can never read directly into buf, since it may contain data + from a region `below' this one in the stack, and our fades (if they exist) + may need to mix with the existing data. + */ - fi_limit = min (to_read, fade_in_length - internal_offset); + if (read_from_sources (_sources, _length, mixdown_buffer, position, to_read, chan_n) != to_read) { + return 0; + } + /* APPLY REGULAR GAIN CURVES AND SCALING TO mixdown_buffer */ - _fade_in->curve().get_vector (internal_offset, internal_offset+fi_limit, gain_buffer, fi_limit); + if (envelope_active()) { + _envelope->curve().get_vector (internal_offset, internal_offset + to_read, gain_buffer, to_read); - for (nframes_t n = 0; n < fi_limit; ++n) { - mixdown_buffer[n] *= gain_buffer[n]; - } + if (_scale_amplitude != 1.0f) { + for (framecnt_t n = 0; n < to_read; ++n) { + mixdown_buffer[n] *= gain_buffer[n] * _scale_amplitude; + } + } else { + for (framecnt_t n = 0; n < to_read; ++n) { + mixdown_buffer[n] *= gain_buffer[n]; } } + } else if (_scale_amplitude != 1.0f) { + apply_gain_to_buffer (mixdown_buffer, to_read, _scale_amplitude); + } - /* fade out */ + /* APPLY FADES TO THE DATA IN mixdown_buffer AND MIX THE RESULTS INTO + * buf. The key things to realize here: (1) the fade being applied is + * (as of April 26th 2012) just the inverse of the fade in curve (2) + * "buf" contains data from lower regions already. So this operation + * fades out the existing material. + */ - if (_fade_out_active && _session.config.get_use_region_fades()) { + if (fade_in_limit != 0) { - /* see if some part of this read is within the fade out */ + if (opaque()) { + if (_inverse_fade_in) { - /* ................. >| REGION - limit + /* explicit inverse fade in curve (e.g. for constant + * power), so we have to fetch it. + */ - { } FADE - fade_out_length - ^ - limit - fade_out_length - |--------------| - ^internal_offset - ^internal_offset + to_read + _inverse_fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit); - we need the intersection of [internal_offset,internal_offset+to_read] with - [limit - fade_out_length, limit] - - */ + /* Fade the data from lower layers out */ + for (framecnt_t n = 0; n < fade_in_limit; ++n) { + buf[n] *= gain_buffer[n]; + } + /* refill gain buffer with the fade in */ - nframes_t fade_out_length = (nframes_t) _fade_out->back()->when; - nframes_t fade_interval_start = max(internal_offset, limit-fade_out_length); - nframes_t fade_interval_end = min(internal_offset + to_read, limit); + _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit); - if (fade_interval_end > fade_interval_start) { - /* (part of the) the fade out is in this buffer */ + } else { - nframes_t fo_limit = fade_interval_end - fade_interval_start; - nframes_t curve_offset = fade_interval_start - (limit-fade_out_length); - nframes_t fade_offset = fade_interval_start - internal_offset; + /* no explicit inverse fade in, so just use (1 - fade + * in) for the fade out of lower layers + */ - _fade_out->curve().get_vector (curve_offset, curve_offset+fo_limit, gain_buffer, fo_limit); + _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit); - for (nframes_t n = 0, m = fade_offset; n < fo_limit; ++n, ++m) { - mixdown_buffer[m] *= gain_buffer[n]; + for (framecnt_t n = 0; n < fade_in_limit; ++n) { + buf[n] *= 1 - gain_buffer[n]; } } + } else { + _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit); + } + /* Mix our newly-read data in, with the fade */ + for (framecnt_t n = 0; n < fade_in_limit; ++n) { + buf[n] += mixdown_buffer[n] * gain_buffer[n]; } } - /* Regular gain curves and scaling */ + if (fade_out_limit != 0) { - if ((rops & ReadOpsOwnAutomation) && envelope_active()) { - _envelope->curve().get_vector (internal_offset, internal_offset + to_read, gain_buffer, to_read); + framecnt_t const curve_offset = fade_interval_start - (_length - _fade_out->back()->when); - if ((rops & ReadOpsOwnScaling) && _scale_amplitude != 1.0f) { - for (nframes_t n = 0; n < to_read; ++n) { - mixdown_buffer[n] *= gain_buffer[n] * _scale_amplitude; + if (opaque()) { + if (_inverse_fade_out) { + + _inverse_fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit); + + /* Fade the data from lower levels in */ + for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) { + buf[m] *= gain_buffer[n]; + } + + /* fetch the actual fade out */ + + _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit); + + } else { + + /* no explicit inverse fade out (which is + * actually a fade in), so just use (1 - fade + * out) for the fade in of lower layers + */ + + _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit); + + for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) { + buf[m] *= 1 - gain_buffer[n]; + } } } else { - for (nframes_t n = 0; n < to_read; ++n) { - mixdown_buffer[n] *= gain_buffer[n]; - } + _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit); } - } else if ((rops & ReadOpsOwnScaling) && _scale_amplitude != 1.0f) { - // XXX this should be using what in 2.0 would have been: - // Session::apply_gain_to_buffer (mixdown_buffer, to_read, _scale_amplitude); + /* Mix our newly-read data with whatever was already there, + with the fade out applied to our data. + */ + for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) { + buf[m] += mixdown_buffer[m] * gain_buffer[n]; + } + } + + /* MIX OR COPY THE REGION BODY FROM mixdown_buffer INTO buf */ - for (nframes_t n = 0; n < to_read; ++n) { - mixdown_buffer[n] *= _scale_amplitude; + framecnt_t const N = to_read - fade_in_limit - fade_out_limit; + if (N > 0) { + if (opaque ()) { + DEBUG_TRACE (DEBUG::AudioPlayback, string_compose ("Region %1 memcpy into buf @ %2 + %3, from mixdown buffer @ %4 + %5, len = %6 cnt was %7\n", + name(), buf, fade_in_limit, mixdown_buffer, fade_in_limit, N, cnt)); + memcpy (buf + fade_in_limit, mixdown_buffer + fade_in_limit, N * sizeof (Sample)); + } else { + mix_buffers_no_gain (buf + fade_in_limit, mixdown_buffer + fade_in_limit, N); } } - if (!opaque() && (buf != mixdown_buffer)) { + return to_read; +} - /* gack. the things we do for users. - */ +/** Read data directly from one of our sources, accounting for the situation when the track has a different channel + * count to the region. + * + * @param srcs Source list to get our source from. + * @param limit Furthest that we should read, as an offset from the region position. + * @param buf Buffer to write data into (existing contents of the buffer will be overwritten) + * @param position Position to read from, in session frames. + * @param cnt Number of frames to read. + * @param chan_n Channel to read from. + * @return Number of frames read. + */ + +framecnt_t +AudioRegion::read_from_sources (SourceList const & srcs, framecnt_t limit, Sample* buf, framepos_t position, framecnt_t cnt, uint32_t chan_n) const +{ + frameoffset_t const internal_offset = position - _position; + if (internal_offset >= limit) { + return 0; + } + + framecnt_t const to_read = min (cnt, limit - internal_offset); + if (to_read == 0) { + return 0; + } + + if (chan_n < n_channels()) { + + boost::shared_ptr src = boost::dynamic_pointer_cast (srcs[chan_n]); + if (src->read (buf, _start + internal_offset, to_read) != to_read) { + return 0; /* "read nothing" */ + } - buf += buf_offset; + } else { + + /* track is N-channel, this region has fewer channels; silence the ones + we don't have. + */ + + if (Config->get_replicate_missing_region_channels()) { + + /* copy an existing channel's data in for this non-existant one */ + + uint32_t channel = chan_n % n_channels(); + boost::shared_ptr src = boost::dynamic_pointer_cast (srcs[channel]); - for (nframes_t n = 0; n < to_read; ++n) { - buf[n] += mixdown_buffer[n]; + if (src->read (buf, _start + internal_offset, to_read) != to_read) { + return 0; /* "read nothing" */ + } + + } else { + + /* use silence */ + memset (buf, 0, sizeof (Sample) * to_read); } } @@ -540,35 +767,40 @@ AudioRegion::_read_at (const SourceList& /*srcs*/, framecnt_t limit, } XMLNode& -AudioRegion::state () +AudioRegion::get_basic_state () { XMLNode& node (Region::state ()); - XMLNode *child; - char buf[64]; - LocaleGuard lg (X_("POSIX")); + LocaleGuard lg; - snprintf (buf, sizeof (buf), "%u", (uint32_t) _sources.size()); - node.add_property ("channels", buf); + node.set_property ("channels", (uint32_t)_sources.size()); - Stateful::add_properties (node); + return node; +} + +XMLNode& +AudioRegion::state () +{ + XMLNode& node (get_basic_state()); + XMLNode *child; + LocaleGuard lg; child = node.add_child ("Envelope"); bool default_env = false; - + // If there are only two points, the points are in the start of the region and the end of the region // so, if they are both at 1.0f, that means the default region. - + if (_envelope->size() == 2 && - _envelope->front()->value == 1.0f && - _envelope->back()->value==1.0f) { + _envelope->front()->value == GAIN_COEFF_UNITY && + _envelope->back()->value==GAIN_COEFF_UNITY) { if (_envelope->front()->when == 0 && _envelope->back()->when == _length) { default_env = true; } } - + if (default_env) { - child->add_property ("default", "yes"); + child->set_property ("default", "yes"); } else { child->add_child_nocopy (_envelope->get_state ()); } @@ -576,19 +808,29 @@ AudioRegion::state () child = node.add_child (X_("FadeIn")); if (_default_fade_in) { - child->add_property ("default", "yes"); + child->set_property ("default", "yes"); } else { child->add_child_nocopy (_fade_in->get_state ()); } + if (_inverse_fade_in) { + child = node.add_child (X_("InverseFadeIn")); + child->add_child_nocopy (_inverse_fade_in->get_state ()); + } + child = node.add_child (X_("FadeOut")); if (_default_fade_out) { - child->add_property ("default", "yes"); + child->set_property ("default", "yes"); } else { child->add_child_nocopy (_fade_out->get_state ()); } + if (_inverse_fade_out) { + child = node.add_child (X_("InverseFadeOut")); + child->add_child_nocopy (_inverse_fade_out->get_state ()); + } + return node; } @@ -596,9 +838,8 @@ int AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_changed, bool send) { const XMLNodeList& nlist = node.children(); - const XMLProperty *prop; - LocaleGuard lg (X_("POSIX")); - boost::shared_ptr the_playlist (_playlist.lock()); + LocaleGuard lg; + boost::shared_ptr the_playlist (_playlist.lock()); suspend_property_changes (); @@ -613,21 +854,21 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ Region::_set_state (node, version, what_changed, false); - if ((prop = node.property ("scale-gain")) != 0) { - float a = atof (prop->value().c_str()); - if (a != _scale_amplitude) { - _scale_amplitude = a; + float val; + if (node.get_property ("scale-gain", val)) { + if (val != _scale_amplitude) { + _scale_amplitude = val; what_changed.add (Properties::scale_amplitude); } } /* Now find envelope description and other related child items */ - _envelope->freeze (); + _envelope->freeze (); for (XMLNodeConstIterator niter = nlist.begin(); niter != nlist.end(); ++niter) { XMLNode *child; - XMLProperty *prop; + XMLProperty const * prop; child = (*niter); @@ -639,7 +880,6 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ set_default_envelope (); } - _envelope->set_max_xval (_length); _envelope->truncate_end (_length); @@ -647,7 +887,8 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ _fade_in->clear (); - if ((prop = child->property ("default")) != 0 || (prop = child->property ("steepness")) != 0) { + bool is_default; + if ((child->get_property ("default", is_default) && is_default) || (prop = child->property ("steepness")) != 0) { set_default_fade_in (); } else { XMLNode* grandchild = child->child ("AutomationList"); @@ -656,19 +897,17 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ } } - if ((prop = child->property ("active")) != 0) { - if (string_is_affirmative (prop->value())) { - set_fade_in_active (true); - } else { - set_fade_in_active (false); - } + bool is_active; + if (child->get_property ("active", is_active)) { + set_fade_in_active (is_active); } } else if (child->name() == "FadeOut") { _fade_out->clear (); - if ((prop = child->property ("default")) != 0 || (prop = child->property ("steepness")) != 0) { + bool is_default; + if ((child->get_property ("default", is_default) && is_default) || (prop = child->property ("steepness")) != 0) { set_default_fade_out (); } else { XMLNode* grandchild = child->child ("AutomationList"); @@ -677,20 +916,27 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ } } - if ((prop = child->property ("active")) != 0) { - if (string_is_affirmative (prop->value())) { - set_fade_out_active (true); - } else { - set_fade_out_active (false); - } + bool is_active; + if (child->get_property ("active", is_active)) { + set_fade_out_active (is_active); } + } else if ( (child->name() == "InverseFadeIn") || (child->name() == "InvFadeIn") ) { + XMLNode* grandchild = child->child ("AutomationList"); + if (grandchild) { + _inverse_fade_in->set_state (*grandchild, version); + } + } else if ( (child->name() == "InverseFadeOut") || (child->name() == "InvFadeOut") ) { + XMLNode* grandchild = child->child ("AutomationList"); + if (grandchild) { + _inverse_fade_out->set_state (*grandchild, version); + } } } - _envelope->thaw (); + _envelope->thaw (); resume_property_changes (); - + if (send) { send_change (what_changed); } @@ -709,150 +955,207 @@ AudioRegion::set_state (const XMLNode& node, int version) return _set_state (node, version, what_changed, true); } +void +AudioRegion::fade_range (framepos_t start, framepos_t end) +{ + framepos_t s, e; + + switch (coverage (start, end)) { + case Evoral::OverlapStart: + trim_front(start); + s = _position; + e = end; + set_fade_in (FadeConstantPower, e - s); + break; + case Evoral::OverlapEnd: + trim_end(end); + s = start; + e = _position + _length; + set_fade_out (FadeConstantPower, e - s); + break; + case Evoral::OverlapInternal: + /* needs addressing, perhaps. Difficult to do if we can't + * control one edge of the fade relative to the relevant edge + * of the region, which we cannot - fades are currently assumed + * to start/end at the start/end of the region + */ + break; + default: + return; + } +} + void AudioRegion::set_fade_in_shape (FadeShape shape) { - set_fade_in (shape, (nframes_t) _fade_in->back()->when); + set_fade_in (shape, (framecnt_t) _fade_in->back()->when); } void AudioRegion::set_fade_out_shape (FadeShape shape) { - set_fade_out (shape, (nframes_t) _fade_out->back()->when); + set_fade_out (shape, (framecnt_t) _fade_out->back()->when); } void AudioRegion::set_fade_in (boost::shared_ptr f) { - _fade_in->freeze (); - *_fade_in = *f; - _fade_in->thaw (); - + _fade_in->freeze (); + *(_fade_in.val()) = *f; + _fade_in->thaw (); + _default_fade_in = false; + send_change (PropertyChange (Properties::fade_in)); } void AudioRegion::set_fade_in (FadeShape shape, framecnt_t len) { - _fade_in->freeze (); + const ARDOUR::ParameterDescriptor desc(FadeInAutomation); + boost::shared_ptr c1 (new Evoral::ControlList (FadeInAutomation, desc)); + boost::shared_ptr c2 (new Evoral::ControlList (FadeInAutomation, desc)); + boost::shared_ptr c3 (new Evoral::ControlList (FadeInAutomation, desc)); + + _fade_in->freeze (); _fade_in->clear (); + _inverse_fade_in->clear (); + + const int num_steps = 32; switch (shape) { case FadeLinear: - _fade_in->fast_simple_add (0.0, 0.0); - _fade_in->fast_simple_add (len, 1.0); + _fade_in->fast_simple_add (0.0, GAIN_COEFF_SMALL); + _fade_in->fast_simple_add (len, GAIN_COEFF_UNITY); + reverse_curve (_inverse_fade_in.val(), _fade_in.val()); break; case FadeFast: - _fade_in->fast_simple_add (0, 0); - _fade_in->fast_simple_add (len * 0.389401, 0.0333333); - _fade_in->fast_simple_add (len * 0.629032, 0.0861111); - _fade_in->fast_simple_add (len * 0.829493, 0.233333); - _fade_in->fast_simple_add (len * 0.9447, 0.483333); - _fade_in->fast_simple_add (len * 0.976959, 0.697222); - _fade_in->fast_simple_add (len, 1); + generate_db_fade (_fade_in.val(), len, num_steps, -60); + reverse_curve (c1, _fade_in.val()); + _fade_in->copy_events (*c1); + generate_inverse_power_curve (_inverse_fade_in.val(), _fade_in.val()); break; case FadeSlow: - _fade_in->fast_simple_add (0, 0); - _fade_in->fast_simple_add (len * 0.0207373, 0.197222); - _fade_in->fast_simple_add (len * 0.0645161, 0.525); - _fade_in->fast_simple_add (len * 0.152074, 0.802778); - _fade_in->fast_simple_add (len * 0.276498, 0.919444); - _fade_in->fast_simple_add (len * 0.481567, 0.980556); - _fade_in->fast_simple_add (len * 0.767281, 1); - _fade_in->fast_simple_add (len, 1); + generate_db_fade (c1, len, num_steps, -1); // start off with a slow fade + generate_db_fade (c2, len, num_steps, -80); // end with a fast fade + merge_curves (_fade_in.val(), c1, c2); + reverse_curve (c3, _fade_in.val()); + _fade_in->copy_events (*c3); + generate_inverse_power_curve (_inverse_fade_in.val(), _fade_in.val()); break; - case FadeLogA: - _fade_in->fast_simple_add (0, 0); - _fade_in->fast_simple_add (len * 0.0737327, 0.308333); - _fade_in->fast_simple_add (len * 0.246544, 0.658333); - _fade_in->fast_simple_add (len * 0.470046, 0.886111); - _fade_in->fast_simple_add (len * 0.652074, 0.972222); - _fade_in->fast_simple_add (len * 0.771889, 0.988889); - _fade_in->fast_simple_add (len, 1); + case FadeConstantPower: + _fade_in->fast_simple_add (0.0, GAIN_COEFF_SMALL); + for (int i = 1; i < num_steps; ++i) { + const float dist = i / (num_steps + 1.f); + _fade_in->fast_simple_add (len * dist, sin (dist * M_PI / 2.0)); + } + _fade_in->fast_simple_add (len, GAIN_COEFF_UNITY); + reverse_curve (_inverse_fade_in.val(), _fade_in.val()); break; - case FadeLogB: - _fade_in->fast_simple_add (0, 0); - _fade_in->fast_simple_add (len * 0.304147, 0.0694444); - _fade_in->fast_simple_add (len * 0.529954, 0.152778); - _fade_in->fast_simple_add (len * 0.725806, 0.333333); - _fade_in->fast_simple_add (len * 0.847926, 0.558333); - _fade_in->fast_simple_add (len * 0.919355, 0.730556); - _fade_in->fast_simple_add (len, 1); + case FadeSymmetric: + //start with a nearly linear cuve + _fade_in->fast_simple_add (0, 1); + _fade_in->fast_simple_add (0.5 * len, 0.6); + //now generate a fade-out curve by successively applying a gain drop + const double breakpoint = 0.7; //linear for first 70% + for (int i = 2; i < 9; ++i) { + const float coeff = (1.f - breakpoint) * powf (0.5, i); + _fade_in->fast_simple_add (len * (breakpoint + ((GAIN_COEFF_UNITY - breakpoint) * (double)i / 9.0)), coeff); + } + _fade_in->fast_simple_add (len, GAIN_COEFF_SMALL); + reverse_curve (c3, _fade_in.val()); + _fade_in->copy_events (*c3); + reverse_curve (_inverse_fade_in.val(), _fade_in.val()); break; } - _fade_in->thaw (); + _fade_in->set_interpolation(Evoral::ControlList::Curved); + _inverse_fade_in->set_interpolation(Evoral::ControlList::Curved); + + _default_fade_in = false; + _fade_in->thaw (); send_change (PropertyChange (Properties::fade_in)); } void AudioRegion::set_fade_out (boost::shared_ptr f) { - _fade_out->freeze (); - *_fade_out = *f; - _fade_out->thaw (); + _fade_out->freeze (); + *(_fade_out.val()) = *f; + _fade_out->thaw (); + _default_fade_out = false; - send_change (PropertyChange (Properties::fade_in)); + send_change (PropertyChange (Properties::fade_out)); } void AudioRegion::set_fade_out (FadeShape shape, framecnt_t len) { - _fade_out->freeze (); + const ARDOUR::ParameterDescriptor desc(FadeOutAutomation); + boost::shared_ptr c1 (new Evoral::ControlList (FadeOutAutomation, desc)); + boost::shared_ptr c2 (new Evoral::ControlList (FadeOutAutomation, desc)); + + _fade_out->freeze (); _fade_out->clear (); + _inverse_fade_out->clear (); + + const int num_steps = 32; switch (shape) { - case FadeFast: - _fade_out->fast_simple_add (len * 0, 1); - _fade_out->fast_simple_add (len * 0.023041, 0.697222); - _fade_out->fast_simple_add (len * 0.0553, 0.483333); - _fade_out->fast_simple_add (len * 0.170507, 0.233333); - _fade_out->fast_simple_add (len * 0.370968, 0.0861111); - _fade_out->fast_simple_add (len * 0.610599, 0.0333333); - _fade_out->fast_simple_add (len * 1, 0); + case FadeLinear: + _fade_out->fast_simple_add (0.0, GAIN_COEFF_UNITY); + _fade_out->fast_simple_add (len, GAIN_COEFF_SMALL); + reverse_curve (_inverse_fade_out.val(), _fade_out.val()); break; - case FadeLogA: - _fade_out->fast_simple_add (len * 0, 1); - _fade_out->fast_simple_add (len * 0.228111, 0.988889); - _fade_out->fast_simple_add (len * 0.347926, 0.972222); - _fade_out->fast_simple_add (len * 0.529954, 0.886111); - _fade_out->fast_simple_add (len * 0.753456, 0.658333); - _fade_out->fast_simple_add (len * 0.9262673, 0.308333); - _fade_out->fast_simple_add (len * 1, 0); + case FadeFast: + generate_db_fade (_fade_out.val(), len, num_steps, -60); + generate_inverse_power_curve (_inverse_fade_out.val(), _fade_out.val()); break; case FadeSlow: - _fade_out->fast_simple_add (len * 0, 1); - _fade_out->fast_simple_add (len * 0.305556, 1); - _fade_out->fast_simple_add (len * 0.548611, 0.991736); - _fade_out->fast_simple_add (len * 0.759259, 0.931129); - _fade_out->fast_simple_add (len * 0.918981, 0.68595); - _fade_out->fast_simple_add (len * 0.976852, 0.22865); - _fade_out->fast_simple_add (len * 1, 0); + generate_db_fade (c1, len, num_steps, -1); //start off with a slow fade + generate_db_fade (c2, len, num_steps, -80); //end with a fast fade + merge_curves (_fade_out.val(), c1, c2); + generate_inverse_power_curve (_inverse_fade_out.val(), _fade_out.val()); break; - case FadeLogB: - _fade_out->fast_simple_add (len * 0, 1); - _fade_out->fast_simple_add (len * 0.080645, 0.730556); - _fade_out->fast_simple_add (len * 0.277778, 0.289256); - _fade_out->fast_simple_add (len * 0.470046, 0.152778); - _fade_out->fast_simple_add (len * 0.695853, 0.0694444); - _fade_out->fast_simple_add (len * 1, 0); + case FadeConstantPower: + //constant-power fades use a sin/cos relationship + //the cutoff is abrupt but it has the benefit of being symmetrical + _fade_out->fast_simple_add (0.0, GAIN_COEFF_UNITY); + for (int i = 1; i < num_steps; ++i) { + const float dist = i / (num_steps + 1.f); + _fade_out->fast_simple_add (len * dist, cos (dist * M_PI / 2.0)); + } + _fade_out->fast_simple_add (len, GAIN_COEFF_SMALL); + reverse_curve (_inverse_fade_out.val(), _fade_out.val()); break; - case FadeLinear: - _fade_out->fast_simple_add (len * 0, 1); - _fade_out->fast_simple_add (len * 1, 0); + case FadeSymmetric: + //start with a nearly linear cuve + _fade_out->fast_simple_add (0, 1); + _fade_out->fast_simple_add (0.5 * len, 0.6); + //now generate a fade-out curve by successively applying a gain drop + const double breakpoint = 0.7; //linear for first 70% + for (int i = 2; i < 9; ++i) { + const float coeff = (1.f - breakpoint) * powf (0.5, i); + _fade_out->fast_simple_add (len * (breakpoint + ((GAIN_COEFF_UNITY - breakpoint) * (double)i / 9.0)), coeff); + } + _fade_out->fast_simple_add (len, GAIN_COEFF_SMALL); + reverse_curve (_inverse_fade_out.val(), _fade_out.val()); break; } - _fade_out->thaw (); - send_change (PropertyChange (Properties::fade_in)); + _fade_out->set_interpolation(Evoral::ControlList::Curved); + _inverse_fade_out->set_interpolation(Evoral::ControlList::Curved); + + _default_fade_out = false; + _fade_out->thaw (); + send_change (PropertyChange (Properties::fade_out)); } void @@ -862,9 +1165,17 @@ AudioRegion::set_fade_in_length (framecnt_t len) len = _length - 1; } + if (len < 64) { + len = 64; + } + bool changed = _fade_in->extend_to (len); if (changed) { + if (_inverse_fade_in) { + _inverse_fade_in->extend_to (len); + } + _default_fade_in = false; send_change (PropertyChange (Properties::fade_in)); } @@ -877,10 +1188,19 @@ AudioRegion::set_fade_out_length (framecnt_t len) len = _length - 1; } + if (len < 64) { + len = 64; + } + bool changed = _fade_out->extend_to (len); if (changed) { + + if (_inverse_fade_out) { + _inverse_fade_out->extend_to (len); + } _default_fade_out = false; + send_change (PropertyChange (Properties::fade_out)); } } @@ -922,14 +1242,14 @@ void AudioRegion::set_default_fade_in () { _fade_in_suspended = 0; - set_fade_in (FadeLinear, 64); + set_fade_in (Config->get_default_fade_shape(), 64); } void AudioRegion::set_default_fade_out () { _fade_out_suspended = 0; - set_fade_out (FadeLinear, 64); + set_fade_out (Config->get_default_fade_shape(), 64); } void @@ -942,11 +1262,11 @@ AudioRegion::set_default_fades () void AudioRegion::set_default_envelope () { - _envelope->freeze (); + _envelope->freeze (); _envelope->clear (); - _envelope->fast_simple_add (0, 1.0f); - _envelope->fast_simple_add (_length, 1.0f); - _envelope->thaw (); + _envelope->fast_simple_add (0, GAIN_COEFF_UNITY); + _envelope->fast_simple_add (_length, GAIN_COEFF_UNITY); + _envelope->thaw (); } void @@ -956,26 +1276,25 @@ AudioRegion::recompute_at_end () based on the the existing curve. */ - _envelope->freeze (); + _envelope->freeze (); _envelope->truncate_end (_length); - _envelope->set_max_xval (_length); - _envelope->thaw (); - + _envelope->thaw (); + suspend_property_changes(); - if (_left_of_split) { - set_default_fade_out (); - _left_of_split = false; - } else if (_fade_out->back()->when > _length) { - _fade_out->extend_to (_length); - send_change (PropertyChange (Properties::fade_out)); - } - + if (_left_of_split) { + set_default_fade_out (); + _left_of_split = false; + } else if (_fade_out->back()->when > _length) { + _fade_out->extend_to (_length); + send_change (PropertyChange (Properties::fade_out)); + } + if (_fade_in->back()->when > _length) { _fade_in->extend_to (_length); send_change (PropertyChange (Properties::fade_in)); } - + resume_property_changes(); } @@ -985,13 +1304,13 @@ AudioRegion::recompute_at_start () /* as above, but the shift was from the front */ _envelope->truncate_start (_length); - + suspend_property_changes(); - if (_right_of_split) { - set_default_fade_in (); - _right_of_split = false; - } else if (_fade_in->back()->when > _length) { + if (_right_of_split) { + set_default_fade_in (); + _right_of_split = false; + } else if (_fade_in->back()->when > _length) { _fade_in->extend_to (_length); send_change (PropertyChange (Properties::fade_in)); } @@ -1000,7 +1319,7 @@ AudioRegion::recompute_at_start () _fade_out->extend_to (_length); send_change (PropertyChange (Properties::fade_out)); } - + resume_property_changes(); } @@ -1037,11 +1356,11 @@ AudioRegion::separate_by_channel (Session& /*session*/, vectorset_whole_file (false); @@ -1055,73 +1374,7 @@ AudioRegion::separate_by_channel (Session& /*session*/, vectorread (buf, pos, cnt, channel); -} - -int -AudioRegion::exportme (Session& /*session*/, ARDOUR::ExportSpecification& /*spec*/) -{ - // TODO EXPORT -// const nframes_t blocksize = 4096; -// nframes_t to_read; -// int status = -1; -// -// spec.channels = _sources.size(); -// -// if (spec.prepare (blocksize, session.frame_rate())) { -// goto out; -// } -// -// spec.pos = 0; -// spec.total_frames = _length; -// -// while (spec.pos < _length && !spec.stop) { -// -// -// /* step 1: interleave */ -// -// to_read = min (_length - spec.pos, blocksize); -// -// if (spec.channels == 1) { -// -// if (read_raw_internal (spec.dataF, _start + spec.pos, to_read) != to_read) { -// goto out; -// } -// -// } else { -// -// Sample buf[blocksize]; -// -// for (uint32_t chan = 0; chan < spec.channels; ++chan) { -// -// if (audio_source(chan)->read (buf, _start + spec.pos, to_read) != to_read) { -// goto out; -// } -// -// for (nframes_t x = 0; x < to_read; ++x) { -// spec.dataF[chan+(x*spec.channels)] = buf[x]; -// } -// } -// } -// -// if (spec.process (to_read)) { -// goto out; -// } -// -// spec.pos += to_read; -// spec.progress = (double) spec.pos /_length; -// -// } -// -// status = 0; -// -// out: -// spec.running = false; -// spec.status = status; -// spec.clear(); -// -// return status; - return 0; + return audio_source(channel)->read (buf, pos, cnt); } void @@ -1142,9 +1395,8 @@ AudioRegion::set_scale_amplitude (gain_t g) send_change (PropertyChange (Properties::scale_amplitude)); } -/** @return the maximum (linear) amplitude of the region */ double -AudioRegion::maximum_amplitude () const +AudioRegion::maximum_amplitude (Progress* p) const { framepos_t fpos = _start; framepos_t const fend = _start + _length; @@ -1152,7 +1404,7 @@ AudioRegion::maximum_amplitude () const framecnt_t const blocksize = 64 * 1024; Sample buf[blocksize]; - + while (fpos < fend) { uint32_t n; @@ -1163,7 +1415,7 @@ AudioRegion::maximum_amplitude () const /* read it in */ - if (read_raw_internal (buf, fpos, to_read, 0) != to_read) { + if (read_raw_internal (buf, fpos, to_read, n) != to_read) { return 0; } @@ -1171,11 +1423,56 @@ AudioRegion::maximum_amplitude () const } fpos += to_read; + if (p) { + p->set_progress (float (fpos - _start) / _length); + if (p->cancelled ()) { + return -1; + } + } } return maxamp; } +double +AudioRegion::rms (Progress* p) const +{ + framepos_t fpos = _start; + framepos_t const fend = _start + _length; + uint32_t const n_chan = n_channels (); + double rms = 0; + + framecnt_t const blocksize = 64 * 1024; + Sample buf[blocksize]; + + framecnt_t total = 0; + + if (n_chan == 0 || fend == fpos) { + return 0; + } + + while (fpos < fend) { + framecnt_t const to_read = min (fend - fpos, blocksize); + for (uint32_t c = 0; c < n_chan; ++c) { + if (read_raw_internal (buf, fpos, to_read, c) != to_read) { + return 0; + } + for (framepos_t i = 0; i < to_read; ++i) { + rms += buf[i] * buf[i]; + } + } + total += to_read; + fpos += to_read; + if (p) { + p->set_progress (float (fpos - _start) / _length); + if (p->cancelled ()) { + return -1; + } + } + } + return sqrt (2. * rms / (double)(total * n_chan)); +} + /** Normalize using a given maximum amplitude and target, so that region * _scale_amplitude becomes target / max_amplitude. */ @@ -1184,14 +1481,14 @@ AudioRegion::normalize (float max_amplitude, float target_dB) { gain_t target = dB_to_coefficient (target_dB); - if (target == 1.0f) { + if (target == GAIN_COEFF_UNITY) { /* do not normalize to precisely 1.0 (0 dBFS), to avoid making it appear that we may have clipped. */ target -= FLT_EPSILON; } - if (max_amplitude == 0.0f) { + if (max_amplitude < GAIN_COEFF_SMALL) { /* don't even try */ return; } @@ -1287,7 +1584,7 @@ AudioRegion::source_offset_changed () if (afs && afs->destructive()) { // set_start (source()->natural_position(), this); - set_position (source()->natural_position(), this); + set_position (source()->natural_position()); } } @@ -1298,93 +1595,164 @@ AudioRegion::audio_source (uint32_t n) const return boost::dynamic_pointer_cast(source(n)); } -int -AudioRegion::adjust_transients (frameoffset_t delta) +uint32_t +AudioRegion::get_related_audio_file_channel_count () const { - for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { - (*x) = (*x) + delta; - } - + uint32_t chan_count = 0; + for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) { + + boost::shared_ptr sndf = boost::dynamic_pointer_cast(*i); + if (sndf ) { + + if (sndf->channel_count() > chan_count) { + chan_count = sndf->channel_count(); + } + } +#ifdef HAVE_COREAUDIO + else { + boost::shared_ptr cauf = boost::dynamic_pointer_cast(*i); + if (cauf) { + if (cauf->channel_count() > chan_count) { + chan_count = cauf->channel_count(); + } + } + } +#endif // HAVE_COREAUDIO + } + + return chan_count; +} + +void +AudioRegion::clear_transients () // yet unused +{ + _user_transients.clear (); + _valid_transients = false; send_change (PropertyChange (Properties::valid_transients)); - - return 0; -} +} -int -AudioRegion::update_transient (framepos_t old_position, framepos_t new_position) +void +AudioRegion::add_transient (framepos_t where) { - for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { - if ((*x) == old_position) { - (*x) = new_position; - send_change (PropertyChange (Properties::valid_transients)); - - break; + if (where < first_frame () || where >= last_frame ()) { + return; + } + where -= _position; + + if (!_valid_transients) { + _transient_user_start = _start; + _valid_transients = true; + } + frameoffset_t offset = _transient_user_start - _start; + + if (where < offset) { + if (offset <= 0) { + return; + } + // region start changed (extend to front), shift points and offset + for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { + (*x) += offset; } + _transient_user_start -= offset; + offset = 0; } - - return 0; + + const framepos_t p = where - offset; + _user_transients.push_back(p); + send_change (PropertyChange (Properties::valid_transients)); } void -AudioRegion::add_transient (framepos_t where) +AudioRegion::update_transient (framepos_t old_position, framepos_t new_position) { - _transients.push_back(where); - _valid_transients = true; - - send_change (PropertyChange (Properties::valid_transients)); + bool changed = false; + if (!_onsets.empty ()) { + const framepos_t p = old_position - _position; + AnalysisFeatureList::iterator x = std::find (_onsets.begin (), _onsets.end (), p); + if (x != _transients.end ()) { + (*x) = new_position - _position; + changed = true; + } + } + + if (_valid_transients) { + const frameoffset_t offset = _position + _transient_user_start - _start; + const framepos_t p = old_position - offset; + AnalysisFeatureList::iterator x = std::find (_user_transients.begin (), _user_transients.end (), p); + if (x != _transients.end ()) { + (*x) = new_position - offset; + changed = true; + } + } + + if (changed) { + send_change (PropertyChange (Properties::valid_transients)); + } } void AudioRegion::remove_transient (framepos_t where) { - _transients.remove(where); - _valid_transients = true; - - send_change (PropertyChange (Properties::valid_transients)); + bool changed = false; + if (!_onsets.empty ()) { + const framepos_t p = where - _position; + AnalysisFeatureList::iterator i = std::find (_onsets.begin (), _onsets.end (), p); + if (i != _transients.end ()) { + _onsets.erase (i); + changed = true; + } + } + + if (_valid_transients) { + const framepos_t p = where - (_position + _transient_user_start - _start); + AnalysisFeatureList::iterator i = std::find (_user_transients.begin (), _user_transients.end (), p); + if (i != _transients.end ()) { + _transients.erase (i); + changed = true; + } + } + + if (changed) { + send_change (PropertyChange (Properties::valid_transients)); + } } -int -AudioRegion::set_transients (AnalysisFeatureList& results) +void +AudioRegion::set_onsets (AnalysisFeatureList& results) { - _transients.clear(); - _transients = results; - _valid_transients = true; - + _onsets.clear(); + _onsets = results; send_change (PropertyChange (Properties::valid_transients)); - - return 0; } -int -AudioRegion::get_transients (AnalysisFeatureList& results, bool force_new) +void +AudioRegion::build_transients () { + _transients.clear (); + _transient_analysis_start = _transient_analysis_end = 0; + boost::shared_ptr pl = playlist(); if (!pl) { - return -1; - } - - if (_valid_transients && !force_new) { - results = _transients; - return 0; + return; } + /* check analyzed sources first */ SourceList::iterator s; - for (s = _sources.begin() ; s != _sources.end(); ++s) { if (!(*s)->has_been_analysed()) { +#ifndef NDEBUG cerr << "For " << name() << " source " << (*s)->name() << " has not been analyzed\n"; +#endif break; } } if (s == _sources.end()) { /* all sources are analyzed, merge data from each one */ - for (s = _sources.begin() ; s != _sources.end(); ++s) { /* find the set of transients within the bounds of this region */ - AnalysisFeatureList::iterator low = lower_bound ((*s)->transients.begin(), (*s)->transients.end(), _start); @@ -1394,181 +1762,268 @@ AudioRegion::get_transients (AnalysisFeatureList& results, bool force_new) _start + _length); /* and add them */ - - results.insert (results.end(), low, high); + _transients.insert (_transients.end(), low, high); } - TransientDetector::cleanup_transients (results, pl->session().frame_rate(), 3.0); + TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); /* translate all transients to current position */ - - for (AnalysisFeatureList::iterator x = results.begin(); x != results.end(); ++x) { + for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { (*x) -= _start; - (*x) += _position; } - _transients = results; - _valid_transients = true; - - return 0; + _transient_analysis_start = _start; + _transient_analysis_end = _start + _length; + return; } /* no existing/complete transient info */ - static bool analyse_dialog_shown = false; /* global per instance of Ardour */ + static bool analyse_dialog_shown = false; /* global per instance of Ardour */ if (!Config->get_auto_analyse_audio()) { - if (!analyse_dialog_shown) { - pl->session().Dialog (_("\ + if (!analyse_dialog_shown) { + pl->session().Dialog (string_compose (_("\ You have requested an operation that requires audio analysis.\n\n\ You currently have \"auto-analyse-audio\" disabled, which means \ that transient data must be generated every time it is required.\n\n\ If you are doing work that will require transient data on a \ regular basis, you should probably enable \"auto-analyse-audio\" \ -then quit ardour and restart.\n\n\ +in Preferences > Audio > Regions, then quit %1 and restart.\n\n\ This dialog will not display again. But you may notice a slight delay \ in this and future transient-detection operations.\n\ -")); - analyse_dialog_shown = true; - } +"), PROGRAM_NAME)); + analyse_dialog_shown = true; + } } - TransientDetector t (pl->session().frame_rate()); - bool existing_results = !results.empty(); + try { + TransientDetector t (pl->session().frame_rate()); + for (uint32_t i = 0; i < n_channels(); ++i) { - _transients.clear (); - _valid_transients = false; - - for (uint32_t i = 0; i < n_channels(); ++i) { + AnalysisFeatureList these_results; - AnalysisFeatureList these_results; - - t.reset (); - - if (t.run ("", this, i, these_results)) { - return -1; - } + t.reset (); - /* translate all transients to give absolute position */ + /* this produces analysis result relative to current position + * ::read() sample 0 is at _position */ + if (t.run ("", this, i, these_results)) { + return; + } - for (AnalysisFeatureList::iterator i = these_results.begin(); i != these_results.end(); ++i) { - (*i) += _position; + /* merge */ + _transients.insert (_transients.end(), these_results.begin(), these_results.end()); } - - /* merge */ - - _transients.insert (_transients.end(), these_results.begin(), these_results.end()); + } catch (...) { + error << string_compose(_("Transient Analysis failed for %1."), _("Audio Region")) << endmsg; + return; } - if (!results.empty()) { - if (existing_results) { - - /* merge our transients into the existing ones, then clean up - those. - */ - - results.insert (results.end(), _transients.begin(), _transients.end()); - TransientDetector::cleanup_transients (results, pl->session().frame_rate(), 3.0); - } - - /* make sure ours are clean too */ + TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); + _transient_analysis_start = _start; + _transient_analysis_end = _start + _length; +} - TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); +/* Transient analysis uses ::read() which is relative to _start, + * at the time of analysis and spans _length samples. + * + * This is true for RhythmFerret::run_analysis and the + * TransientDetector here. + * + * We store _start and length in _transient_analysis_start, + * _transient_analysis_end in case the region is trimmed or split after analysis. + * + * Various methods (most notably Playlist::find_next_transient and + * RhythmFerret::do_split_action) span multiple regions and *merge/combine* + * Analysis results. + * We therefore need to translate the analysis timestamps to absolute session-time + * and include the _position of the region. + * + * Note: we should special case the AudioRegionView. The region-view itself + * is located at _position (currently ARV subtracts _position again) + */ +void +AudioRegion::get_transients (AnalysisFeatureList& results) +{ + boost::shared_ptr pl = playlist(); + if (!playlist ()) { + return; + } - } else { + Region::merge_features (results, _user_transients, _position + _transient_user_start - _start); - TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); - results = _transients; + if (!_onsets.empty ()) { + // onsets are invalidated when start or length changes + merge_features (results, _onsets, _position); + return; } - _valid_transients = true; + if ((_transient_analysis_start == _transient_analysis_end) + || _transient_analysis_start > _start + || _transient_analysis_end < _start + _length) { + build_transients (); + } - return 0; + merge_features (results, _transients, _position + _transient_analysis_start - _start); } /** Find areas of `silence' within a region. * * @param threshold Threshold below which signal is considered silence (as a sample value) * @param min_length Minimum length of silent period to be reported. - * @return Silent periods; first of pair is the offset within the region, second is the length of the period + * @return Silent intervals, measured relative to the region start in the source */ -std::list > -AudioRegion::find_silence (Sample threshold, framecnt_t min_length, InterThreadInfo& itt) const +AudioIntervalResult +AudioRegion::find_silence (Sample threshold, framecnt_t min_length, framecnt_t fade_length, InterThreadInfo& itt) const { framecnt_t const block_size = 64 * 1024; - Sample loudest[block_size]; - Sample buf[block_size]; + boost::scoped_array loudest (new Sample[block_size]); + boost::scoped_array buf (new Sample[block_size]); + + assert (fade_length >= 0); + assert (min_length > 0); framepos_t pos = _start; - framepos_t const end = _start + _length - 1; + framepos_t const end = _start + _length; - std::list > silent_periods; + AudioIntervalResult silent_periods; - bool in_silence = false; - frameoffset_t silence_start = 0; - bool silence; + bool in_silence = true; + frameoffset_t silence_start = _start; while (pos < end && !itt.cancel) { + framecnt_t cur_samples = 0; + framecnt_t const to_read = min (end - pos, block_size); /* fill `loudest' with the loudest absolute sample at each instant, across all channels */ - memset (loudest, 0, sizeof (Sample) * block_size); + memset (loudest.get(), 0, sizeof (Sample) * block_size); + for (uint32_t n = 0; n < n_channels(); ++n) { - read_raw_internal (buf, pos, block_size, n); - for (framecnt_t i = 0; i < block_size; ++i) { + cur_samples = read_raw_internal (buf.get(), pos, to_read, n); + for (framecnt_t i = 0; i < cur_samples; ++i) { loudest[i] = max (loudest[i], abs (buf[i])); } } /* now look for silence */ - for (framecnt_t i = 0; i < block_size; ++i) { - silence = abs (loudest[i]) < threshold; + for (framecnt_t i = 0; i < cur_samples; ++i) { + bool const silence = abs (loudest[i]) < threshold; if (silence && !in_silence) { /* non-silence to silence */ in_silence = true; - silence_start = pos + i; + silence_start = pos + i + fade_length; } else if (!silence && in_silence) { /* silence to non-silence */ in_silence = false; - if (pos + i - 1 - silence_start >= min_length) { - silent_periods.push_back (std::make_pair (silence_start, pos + i - 1)); + frameoffset_t silence_end = pos + i - 1 - fade_length; + + if (silence_end - silence_start >= min_length) { + silent_periods.push_back (std::make_pair (silence_start, silence_end)); } } } - pos += block_size; - itt.progress = (end-pos)/(double)_length; + pos += cur_samples; + itt.progress = (end - pos) / (double)_length; + + if (cur_samples == 0) { + assert (pos >= end); + break; + } } - if (in_silence && end - 1 - silence_start >= min_length) { + if (in_silence && !itt.cancel) { /* last block was silent, so finish off the last period */ - silent_periods.push_back (std::make_pair (silence_start, end)); + if (end - 1 - silence_start >= min_length + fade_length) { + silent_periods.push_back (std::make_pair (silence_start, end - 1)); + } } - itt.done = true; + itt.done = true; return silent_periods; } - - -extern "C" { - - int region_read_peaks_from_c (void *arg, uint32_t npeaks, uint32_t start, uint32_t cnt, intptr_t data, uint32_t n_chan, double samples_per_unit) +Evoral::Range +AudioRegion::body_range () const { - return ((AudioRegion *) arg)->read_peaks ((PeakData *) data, (framecnt_t) npeaks, (framepos_t) start, (framecnt_t) cnt, n_chan,samples_per_unit); + return Evoral::Range (first_frame() + _fade_in->back()->when + 1, last_frame() - _fade_out->back()->when); } -uint32_t region_length_from_c (void *arg) +boost::shared_ptr +AudioRegion::get_single_other_xfade_region (bool start) const { + boost::shared_ptr pl (playlist()); + + if (!pl) { + /* not currently in a playlist - xfade length is unbounded + (and irrelevant) + */ + return boost::shared_ptr (); + } + + boost::shared_ptr rl; + + if (start) { + rl = pl->regions_at (position()); + } else { + rl = pl->regions_at (last_frame()); + } + + RegionList::iterator i; + boost::shared_ptr other; + uint32_t n = 0; + + /* count and find the other region in a single pass through the list */ + + for (i = rl->begin(); i != rl->end(); ++i) { + if ((*i).get() != this) { + other = *i; + } + ++n; + } + + if (n != 2) { + /* zero or multiple regions stacked here - don't care about xfades */ + return boost::shared_ptr (); + } - return ((AudioRegion *) arg)->length(); + return other; } -uint32_t sourcefile_length_from_c (void *arg, double zoom_factor) +framecnt_t +AudioRegion::verify_xfade_bounds (framecnt_t len, bool start) { - return ( (AudioRegion *) arg)->audio_source()->available_peaks (zoom_factor) ; + /* this is called from a UI to check on whether a new proposed + length for an xfade is legal or not. it returns the legal + length corresponding to @a len which may be shorter than or + equal to @a len itself. + */ + + boost::shared_ptr other = get_single_other_xfade_region (start); + framecnt_t maxlen; + + if (!other) { + /* zero or > 2 regions here, don't care about len, but + it can't be longer than the region itself. + */ + return min (length(), len); + } + + /* we overlap a single region. clamp the length of an xfade to + the maximum possible duration of the overlap (if the other + region were trimmed appropriately). + */ + + if (start) { + maxlen = other->latest_possible_frame() - position(); + } else { + maxlen = last_frame() - other->earliest_possible_position(); + } + + return min (length(), min (maxlen, len)); + } -} /* extern "C" */