#include <set>
+#include <boost/scoped_array.hpp>
#include <glibmm/thread.h>
#include "evoral/Curve.hpp"
#include "ardour/audioregion.h"
-#include "ardour/debug.h"
#include "ardour/session.h"
-#include "ardour/gain.h"
#include "ardour/dB.h"
#include "ardour/playlist.h"
#include "ardour/audiofilesource.h"
PBD::PropertyDescriptor<bool> fade_in_active;
PBD::PropertyDescriptor<bool> fade_out_active;
PBD::PropertyDescriptor<float> scale_amplitude;
+ PBD::PropertyDescriptor<bool> fade_out_is_xfade;
+ PBD::PropertyDescriptor<bool> fade_out_is_short;
+ PBD::PropertyDescriptor<bool> fade_in_is_xfade;
+ PBD::PropertyDescriptor<bool> fade_in_is_short;
+ }
+}
+
+static const double VERY_SMALL_SIGNAL = 0.0000001; //-140dB
+
+/* Curve manipulations */
+
+static void
+reverse_curve (boost::shared_ptr<Evoral::ControlList> dst, boost::shared_ptr<const Evoral::ControlList> src)
+{
+ size_t len = src->back()->when;
+
+ for (Evoral::ControlList::const_iterator it = src->begin(); it!=src->end(); it++) {
+ dst->add (len - (*it)->when, (*it)->value);
+ }
+}
+
+static void
+generate_inverse_power_curve (boost::shared_ptr<Evoral::ControlList> dst, boost::shared_ptr<const Evoral::ControlList> src)
+{
+ // calc inverse curve using sum of squares
+ for (Evoral::ControlList::const_iterator it = src->begin(); it!=src->end(); ++it ) {
+ float value = (*it)->value;
+ value = 1 - powf(value,2);
+ value = sqrtf(value);
+ dst->fast_simple_add ( (*it)->when, value );
+ }
+}
+
+static void
+generate_db_fade (boost::shared_ptr<Evoral::ControlList> dst, double len, int num_steps, float dB_drop)
+{
+ dst->clear ();
+ dst->fast_simple_add (0, 1);
+
+ //generate a fade-out curve by successively applying a gain drop
+ float fade_speed = dB_to_coefficient(dB_drop / (float) num_steps);
+ for (int i = 1; i < (num_steps-1); i++) {
+ float coeff = 1.0;
+ for (int j = 0; j < i; j++) {
+ coeff *= fade_speed;
+ }
+ dst->fast_simple_add (len*(double)i/(double)num_steps, coeff);
+ }
+
+ dst->fast_simple_add (len, VERY_SMALL_SIGNAL);
+}
+
+static void
+merge_curves (boost::shared_ptr<Evoral::ControlList> dst,
+ boost::shared_ptr<const Evoral::ControlList> curve1,
+ boost::shared_ptr<const Evoral::ControlList> curve2)
+{
+ Evoral::ControlList::EventList::size_type size = curve1->size();
+
+ //curve lengths must match for now
+ if (size != curve2->size()) {
+ return;
+ }
+
+ Evoral::ControlList::const_iterator c1 = curve1->begin();
+ int count = 0;
+ for (Evoral::ControlList::const_iterator c2 = curve2->begin(); c2!=curve2->end(); c2++ ) {
+ float v1 = accurate_coefficient_to_dB((*c1)->value);
+ float v2 = accurate_coefficient_to_dB((*c2)->value);
+
+ double interp = v1 * ( 1.0-( (double)count / (double)size) );
+ interp += v2 * ( (double)count / (double)size );
+
+ interp = dB_to_coefficient(interp);
+ dst->add ( (*c1)->when, interp );
+ c1++;
+ count++;
}
}
AudioRegion::make_property_quarks ()
{
Properties::envelope_active.property_id = g_quark_from_static_string (X_("envelope-active"));
- DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for envelope-active = %1\n", Properties::envelope_active.property_id));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for envelope-active = %1\n", Properties::envelope_active.property_id));
Properties::default_fade_in.property_id = g_quark_from_static_string (X_("default-fade-in"));
- DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-in = %1\n", Properties::default_fade_in.property_id));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-in = %1\n", Properties::default_fade_in.property_id));
Properties::default_fade_out.property_id = g_quark_from_static_string (X_("default-fade-out"));
- DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-out = %1\n", Properties::default_fade_out.property_id));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-out = %1\n", Properties::default_fade_out.property_id));
Properties::fade_in_active.property_id = g_quark_from_static_string (X_("fade-in-active"));
- DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-in-active = %1\n", Properties::fade_in_active.property_id));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-in-active = %1\n", Properties::fade_in_active.property_id));
Properties::fade_out_active.property_id = g_quark_from_static_string (X_("fade-out-active"));
- DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-out-active = %1\n", Properties::fade_out_active.property_id));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-out-active = %1\n", Properties::fade_out_active.property_id));
Properties::scale_amplitude.property_id = g_quark_from_static_string (X_("scale-amplitude"));
- DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for scale-amplitude = %1\n", Properties::scale_amplitude.property_id));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for scale-amplitude = %1\n", Properties::scale_amplitude.property_id));
+ Properties::fade_out_is_xfade.property_id = g_quark_from_static_string (X_("fade-out-is-xfade"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-out-is-xfade = %1\n", Properties::fade_out_is_xfade.property_id));
+ Properties::fade_out_is_short.property_id = g_quark_from_static_string (X_("fade-out-is-short"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-out-is-short = %1\n", Properties::fade_out_is_short.property_id));
+ Properties::fade_in_is_xfade.property_id = g_quark_from_static_string (X_("fade-in-is-xfade"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-in-is-xfade = %1\n", Properties::fade_in_is_xfade.property_id));
+ Properties::fade_in_is_short.property_id = g_quark_from_static_string (X_("fade-in-is-short"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-in-is-short = %1\n", Properties::fade_in_is_short.property_id));
}
void
add_property (_fade_in_active);
add_property (_fade_out_active);
add_property (_scale_amplitude);
+ add_property (_fade_out_is_xfade);
+ add_property (_fade_out_is_short);
+ add_property (_fade_in_is_xfade);
+ add_property (_fade_in_is_short);
}
#define AUDIOREGION_STATE_DEFAULT \
, _default_fade_out (Properties::default_fade_out, true) \
, _fade_in_active (Properties::fade_in_active, true) \
, _fade_out_active (Properties::fade_out_active, true) \
- , _scale_amplitude (Properties::scale_amplitude, 1.0)
-
+ , _scale_amplitude (Properties::scale_amplitude, 1.0) \
+ , _fade_in_is_xfade (Properties::fade_in_is_xfade, false) \
+ , _fade_out_is_xfade (Properties::fade_out_is_xfade, false) \
+ , _fade_in_is_short (Properties::fade_in_is_short, false) \
+ , _fade_out_is_short (Properties::fade_out_is_short, false)
+
#define AUDIOREGION_COPY_STATE(other) \
_envelope_active (Properties::envelope_active, other->_envelope_active) \
, _default_fade_in (Properties::default_fade_in, other->_default_fade_in) \
, _default_fade_out (Properties::default_fade_out, other->_default_fade_out) \
, _fade_in_active (Properties::fade_in_active, other->_fade_in_active) \
, _fade_out_active (Properties::fade_out_active, other->_fade_out_active) \
- , _scale_amplitude (Properties::scale_amplitude, other->_scale_amplitude)
+ , _scale_amplitude (Properties::scale_amplitude, other->_scale_amplitude) \
+ , _fade_in_is_xfade (Properties::fade_in_is_xfade, other->_fade_in_is_xfade) \
+ , _fade_out_is_xfade (Properties::fade_out_is_xfade, other->_fade_out_is_xfade) \
+ , _fade_in_is_short (Properties::fade_in_is_short, other->_fade_in_is_short) \
+ , _fade_out_is_short (Properties::fade_out_is_short, other->_fade_out_is_short)
/* a Session will reset these to its chosen defaults by calling AudioRegion::set_default_fade() */
void
, AUDIOREGION_STATE_DEFAULT
, _automatable (s)
, _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
+ , _inverse_fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
, _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
+ , _inverse_fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
, _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
, _fade_in_suspended (0)
, _fade_out_suspended (0)
, AUDIOREGION_STATE_DEFAULT
, _automatable(srcs[0]->session())
, _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
+ , _inverse_fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
, _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
+ , _inverse_fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
, _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
, _fade_in_suspended (0)
, _fade_out_suspended (0)
assert (_sources.size() == _master_sources.size());
}
-AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other, framecnt_t offset, bool offset_relative)
- : Region (other, offset, offset_relative)
+AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other)
+ : Region (other)
, AUDIOREGION_COPY_STATE (other)
, _automatable (other->session())
, _fade_in (new AutomationList (*other->_fade_in))
+ , _inverse_fade_in (new AutomationList(*other->_inverse_fade_in))
, _fade_out (new AutomationList (*other->_fade_out))
+ , _inverse_fade_out (new AutomationList (*other->_inverse_fade_out))
+ /* As far as I can see, the _envelope's times are relative to region position, and have nothing
+ to do with sources (and hence _start). So when we copy the envelope, we just use the supplied offset.
+ */
+ , _envelope (new AutomationList (*other->_envelope, 0, other->_length))
+ , _fade_in_suspended (0)
+ , _fade_out_suspended (0)
+{
+ /* don't use init here, because we got fade in/out from the other region
+ */
+ register_properties ();
+ listen_to_my_curves ();
+ connect_to_analysis_changed ();
+ connect_to_header_position_offset_changed ();
+
+ assert(_type == DataType::AUDIO);
+ assert (_sources.size() == _master_sources.size());
+}
+
+AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other, framecnt_t offset)
+ : Region (other, offset)
+ , AUDIOREGION_COPY_STATE (other)
+ , _automatable (other->session())
+ , _fade_in (new AutomationList (*other->_fade_in))
+ , _inverse_fade_in (new AutomationList(*other->_inverse_fade_in))
+ , _fade_out (new AutomationList (*other->_fade_out))
+ , _inverse_fade_out (new AutomationList (*other->_inverse_fade_out))
/* As far as I can see, the _envelope's times are relative to region position, and have nothing
to do with sources (and hence _start). So when we copy the envelope, we just use the supplied offset.
*/
, AUDIOREGION_COPY_STATE (other)
, _automatable (other->session())
, _fade_in (new AutomationList (*other->_fade_in))
+ , _inverse_fade_in (new AutomationList(*other->_inverse_fade_in))
, _fade_out (new AutomationList (*other->_fade_out))
+ , _inverse_fade_out (new AutomationList (*other->_inverse_fade_out))
, _envelope (new AutomationList (*other->_envelope))
, _fade_in_suspended (0)
, _fade_out_suspended (0)
, AUDIOREGION_STATE_DEFAULT
, _automatable(srcs[0]->session())
, _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
+ , _inverse_fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
, _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
+ , _inverse_fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
, _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
, _fade_in_suspended (0)
, _fade_out_suspended (0)
}
void
-AudioRegion::post_set ()
+AudioRegion::post_set (const PropertyChange& /*ignored*/)
{
if (!_sync_marked) {
_sync_position = _start;
if (_left_of_split) {
if (_fade_in->back()->when >= _length) {
set_default_fade_in ();
- }
+ }
set_default_fade_out ();
_left_of_split = false;
}
if (_right_of_split) {
if (_fade_out->back()->when >= _length) {
set_default_fade_out ();
- }
+ }
set_default_fade_in ();
_right_of_split = false;
for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) {
- /* connect only once to HeaderPositionOffsetChanged, even if sources are replicated
- */
+ /* connect only once to HeaderPositionOffsetChanged, even if sources are replicated
+ */
if (unique_srcs.find (*i) == unique_srcs.end ()) {
unique_srcs.insert (*i);
}
}
-ARDOUR::nframes_t
-AudioRegion::read_peaks (PeakData *buf, nframes_t npeaks, nframes_t offset, nframes_t cnt, uint32_t chan_n, double samples_per_unit) const
+ARDOUR::framecnt_t
+AudioRegion::read_peaks (PeakData *buf, framecnt_t npeaks, framecnt_t offset, framecnt_t cnt, uint32_t chan_n, double samples_per_unit) const
{
if (chan_n >= _sources.size()) {
return 0;
return 0;
} else {
if (_scale_amplitude != 1.0f) {
- for (nframes_t n = 0; n < npeaks; ++n) {
+ for (framecnt_t n = 0; n < npeaks; ++n) {
buf[n].max *= _scale_amplitude;
buf[n].min *= _scale_amplitude;
}
}
}
+/** @param buf Buffer to write data to (existing data will be overwritten).
+ * @param pos Position to read from as an offset from the region position.
+ * @param cnt Number of frames to read.
+ * @param channel Channel to read from.
+ */
framecnt_t
-AudioRegion::read (Sample* buf, framepos_t timeline_position, framecnt_t cnt, int channel) const
+AudioRegion::read (Sample* buf, framepos_t pos, framecnt_t cnt, int channel) const
{
/* raw read, no fades, no gain, nada */
- return _read_at (_sources, _length, buf, 0, 0, _position + timeline_position, cnt, channel, 0, 0, ReadOps (0));
-}
-
-framecnt_t
-AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
- framepos_t file_position, framecnt_t cnt, uint32_t chan_n,
- framecnt_t read_frames, framecnt_t skip_frames) const
-{
- /* regular diskstream/butler read complete with fades etc */
- return _read_at (_sources, _length, buf, mixdown_buffer, gain_buffer,
- file_position, cnt, chan_n, read_frames, skip_frames, ReadOps (~0));
+ return read_from_sources (_sources, _length, buf, _position + pos, cnt, channel);
}
framecnt_t
-AudioRegion::master_read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
+AudioRegion::master_read_at (Sample *buf, Sample* /*mixdown_buffer*/, float* /*gain_buffer*/,
framepos_t position, framecnt_t cnt, uint32_t chan_n) const
{
/* do not read gain/scaling/fades and do not count this disk i/o in statistics */
- return _read_at (_master_sources, _master_sources.front()->length(_master_sources.front()->timeline_position()),
- buf, mixdown_buffer, gain_buffer, position, cnt, chan_n, 0, 0, ReadOps (0));
+ assert (cnt >= 0);
+ return read_from_sources (
+ _master_sources, _master_sources.front()->length (_master_sources.front()->timeline_position()),
+ buf, position, cnt, chan_n
+ );
}
+/** @param buf Buffer to mix data into.
+ * @param mixdown_buffer Scratch buffer for audio data.
+ * @param gain_buffer Scratch buffer for gain data.
+ * @param position Position within the session to read from.
+ * @param cnt Number of frames to read.
+ * @param chan_n Channel number to read.
+ */
framecnt_t
-AudioRegion::_read_at (const SourceList& /*srcs*/, framecnt_t limit,
- Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
- framepos_t position,
- framecnt_t cnt,
- uint32_t chan_n,
- framecnt_t /*read_frames*/,
- framecnt_t /*skip_frames*/,
- ReadOps rops) const
-{
- frameoffset_t internal_offset;
- frameoffset_t buf_offset;
- framecnt_t to_read;
- bool raw = (rops == ReadOpsNone);
-
- if (n_channels() == 0) {
- return 0;
- }
-
- if (muted() && !raw) {
- return 0; /* read nothing */
- }
-
- /* precondition: caller has verified that we cover the desired section */
+AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
+ framepos_t position,
+ framecnt_t cnt,
+ uint32_t chan_n) const
+{
+ /* We are reading data from this region into buf (possibly via mixdown_buffer).
+ The caller has verified that we cover the desired section.
+ */
- if (position < _position) {
- internal_offset = 0;
- buf_offset = _position - position;
- cnt -= buf_offset;
- } else {
- internal_offset = position - _position;
- buf_offset = 0;
- }
+ /* See doc/region_read.svg for a drawing which might help to explain
+ what is going on.
+ */
- if (internal_offset >= limit) {
- return 0; /* read nothing */
+ assert (cnt >= 0);
+
+ if (n_channels() == 0) {
+ return 0;
}
- if ((to_read = min (cnt, limit - internal_offset)) == 0) {
+ if (muted()) {
return 0; /* read nothing */
}
- if (opaque() || raw) {
- /* overwrite whatever is there */
- mixdown_buffer = buf + buf_offset;
- } else {
- mixdown_buffer += buf_offset;
- }
-
- if (rops & ReadOpsCount) {
- _read_data_count = 0;
- }
-
- if (chan_n < n_channels()) {
-
- boost::shared_ptr<AudioSource> src = audio_source(chan_n);
- if (src->read (mixdown_buffer, _start + internal_offset, to_read) != to_read) {
- return 0; /* "read nothing" */
- }
-
- if (rops & ReadOpsCount) {
- _read_data_count += src->read_data_count();
- }
-
- } else {
-
- /* track is N-channel, this region has less channels; silence the ones
- we don't have.
- */
+
+ /* WORK OUT WHERE TO GET DATA FROM */
- if (Config->get_replicate_missing_region_channels()) {
- /* track is N-channel, this region has less channels, so use a relevant channel
- */
-
- uint32_t channel = n_channels() % chan_n;
- boost::shared_ptr<AudioSource> src = audio_source (channel);
+ framecnt_t to_read;
- if (src->read (mixdown_buffer, _start + internal_offset, to_read) != to_read) {
- return 0; /* "read nothing" */
- }
+ assert (position >= _position);
+ frameoffset_t const internal_offset = position - _position;
- /* adjust read data count appropriately since this was a duplicate read */
- src->dec_read_data_count (to_read);
- } else {
- memset (mixdown_buffer, 0, sizeof (Sample) * cnt);
- }
+ if (internal_offset >= _length) {
+ return 0; /* read nothing */
}
- if (rops & ReadOpsFades) {
-
- /* fade in */
-
- if (_fade_in_active && _session.config.get_use_region_fades()) {
-
- nframes_t fade_in_length = (nframes_t) _fade_in->back()->when;
+ if ((to_read = min (cnt, _length - internal_offset)) == 0) {
+ return 0; /* read nothing */
+ }
- /* see if this read is within the fade in */
- if (internal_offset < fade_in_length) {
+ /* COMPUTE DETAILS OF ANY FADES INVOLVED IN THIS READ */
- nframes_t fi_limit;
+ /* Amount (length) of fade in that we are dealing with in this read */
+ framecnt_t fade_in_limit = 0;
- fi_limit = min (to_read, fade_in_length - internal_offset);
+ /* Offset from buf / mixdown_buffer of the start
+ of any fade out that we are dealing with
+ */
+ frameoffset_t fade_out_offset = 0;
+
+ /* Amount (length) of fade out that we are dealing with in this read */
+ framecnt_t fade_out_limit = 0;
+ framecnt_t fade_interval_start = 0;
- _fade_in->curve().get_vector (internal_offset, internal_offset+fi_limit, gain_buffer, fi_limit);
+ /* Fade in */
+
+ if (_fade_in_active && _session.config.get_use_region_fades()) {
+
+ framecnt_t fade_in_length = (framecnt_t) _fade_in->back()->when;
- for (nframes_t n = 0; n < fi_limit; ++n) {
- mixdown_buffer[n] *= gain_buffer[n];
- }
- }
+ /* see if this read is within the fade in */
+
+ if (internal_offset < fade_in_length) {
+ fade_in_limit = min (to_read, fade_in_length - internal_offset);
}
-
- /* fade out */
-
- if (_fade_out_active && _session.config.get_use_region_fades()) {
-
- /* see if some part of this read is within the fade out */
+ }
+
+ /* Fade out */
+
+ if (_fade_out_active && _session.config.get_use_region_fades()) {
+
+ /* see if some part of this read is within the fade out */
/* ................. >| REGION
- limit
+ _length
{ } FADE
fade_out_length
^
- limit - fade_out_length
+ _length - fade_out_length
|--------------|
^internal_offset
^internal_offset + to_read
we need the intersection of [internal_offset,internal_offset+to_read] with
- [limit - fade_out_length, limit]
+ [_length - fade_out_length, _length]
*/
- nframes_t fade_out_length = (nframes_t) _fade_out->back()->when;
- nframes_t fade_interval_start = max(internal_offset, limit-fade_out_length);
- nframes_t fade_interval_end = min(internal_offset + to_read, limit);
+ fade_interval_start = max (internal_offset, _length - framecnt_t (_fade_out->back()->when));
+ framecnt_t fade_interval_end = min(internal_offset + to_read, _length.val());
+
+ if (fade_interval_end > fade_interval_start) {
+ /* (part of the) the fade out is in this buffer */
+ fade_out_limit = fade_interval_end - fade_interval_start;
+ fade_out_offset = fade_interval_start - internal_offset;
+ }
+ }
- if (fade_interval_end > fade_interval_start) {
- /* (part of the) the fade out is in this buffer */
+ /* READ DATA FROM THE SOURCE INTO mixdown_buffer.
+ We can never read directly into buf, since it may contain data
+ from a region `below' this one in the stack, and our fades (if they exist)
+ may need to mix with the existing data.
+ */
- nframes_t fo_limit = fade_interval_end - fade_interval_start;
- nframes_t curve_offset = fade_interval_start - (limit-fade_out_length);
- nframes_t fade_offset = fade_interval_start - internal_offset;
+ if (read_from_sources (_sources, _length, mixdown_buffer, position, to_read, chan_n) != to_read) {
+ return 0;
+ }
- _fade_out->curve().get_vector (curve_offset, curve_offset+fo_limit, gain_buffer, fo_limit);
+ /* APPLY REGULAR GAIN CURVES AND SCALING TO mixdown_buffer */
+
+ if (envelope_active()) {
+ _envelope->curve().get_vector (internal_offset, internal_offset + to_read, gain_buffer, to_read);
- for (nframes_t n = 0, m = fade_offset; n < fo_limit; ++n, ++m) {
- mixdown_buffer[m] *= gain_buffer[n];
+ if (_scale_amplitude != 1.0f) {
+ for (framecnt_t n = 0; n < to_read; ++n) {
+ mixdown_buffer[n] *= gain_buffer[n] * _scale_amplitude;
+ }
+ } else {
+ for (framecnt_t n = 0; n < to_read; ++n) {
+ mixdown_buffer[n] *= gain_buffer[n];
+ }
+ }
+ } else if (_scale_amplitude != 1.0f) {
+ apply_gain_to_buffer (mixdown_buffer, to_read, _scale_amplitude);
+ }
+
+ /* APPLY FADES TO THE DATA IN mixdown_buffer AND MIX THE RESULTS INTO
+ * buf. The key things to realize here: (1) the fade being applied is
+ * (as of April 26th 2012) just the inverse of the fade in curve (2)
+ * "buf" contains data from lower regions already. So this operation
+ * fades out the existing material.
+ */
+
+ if (fade_in_limit != 0) {
+
+ if (opaque()) {
+ if (_inverse_fade_in) {
+
+
+ /* explicit inverse fade in curve (e.g. for constant
+ * power), so we have to fetch it.
+ */
+
+ _inverse_fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+
+ /* Fade the data from lower layers out */
+ for (framecnt_t n = 0; n < fade_in_limit; ++n) {
+ buf[n] *= gain_buffer[n];
+ }
+
+ /* refill gain buffer with the fade in */
+
+ _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+
+ } else {
+
+ /* no explicit inverse fade in, so just use (1 - fade
+ * in) for the fade out of lower layers
+ */
+
+ _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+
+ for (framecnt_t n = 0; n < fade_in_limit; ++n) {
+ buf[n] *= 1 - gain_buffer[n];
}
}
+ } else {
+ _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+ }
+
+ /* Mix our newly-read data in, with the fade */
+ for (framecnt_t n = 0; n < fade_in_limit; ++n) {
+ buf[n] += mixdown_buffer[n] * gain_buffer[n];
}
}
- /* Regular gain curves and scaling */
+ if (fade_out_limit != 0) {
- if ((rops & ReadOpsOwnAutomation) && envelope_active()) {
- _envelope->curve().get_vector (internal_offset, internal_offset + to_read, gain_buffer, to_read);
+ framecnt_t const curve_offset = fade_interval_start - (_length - _fade_out->back()->when);
- if ((rops & ReadOpsOwnScaling) && _scale_amplitude != 1.0f) {
- for (nframes_t n = 0; n < to_read; ++n) {
- mixdown_buffer[n] *= gain_buffer[n] * _scale_amplitude;
+ if (opaque()) {
+ if (_inverse_fade_out) {
+
+ _inverse_fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
+
+ /* Fade the data from lower levels in */
+ for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) {
+ buf[m] *= gain_buffer[n];
+ }
+
+ /* fetch the actual fade out */
+
+ _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
+
+ } else {
+
+ /* no explicit inverse fade out (which is
+ * actually a fade in), so just use (1 - fade
+ * out) for the fade in of lower layers
+ */
+
+ _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
+
+ for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) {
+ buf[m] *= 1 - gain_buffer[n];
+ }
}
} else {
- for (nframes_t n = 0; n < to_read; ++n) {
- mixdown_buffer[n] *= gain_buffer[n];
- }
+ _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
}
- } else if ((rops & ReadOpsOwnScaling) && _scale_amplitude != 1.0f) {
- // XXX this should be using what in 2.0 would have been:
- // Session::apply_gain_to_buffer (mixdown_buffer, to_read, _scale_amplitude);
+ /* Mix our newly-read data with whatever was already there,
+ with the fade out applied to our data.
+ */
+ for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) {
+ buf[m] += mixdown_buffer[m] * gain_buffer[n];
+ }
+ }
+
+ /* MIX OR COPY THE REGION BODY FROM mixdown_buffer INTO buf */
- for (nframes_t n = 0; n < to_read; ++n) {
- mixdown_buffer[n] *= _scale_amplitude;
+ framecnt_t const N = to_read - fade_in_limit - fade_out_limit;
+ if (N > 0) {
+ if (opaque ()) {
+ memcpy (buf + fade_in_limit, mixdown_buffer + fade_in_limit, N * sizeof (Sample));
+ } else {
+ mix_buffers_no_gain (buf + fade_in_limit, mixdown_buffer + fade_in_limit, N);
}
}
- if (!opaque() && (buf != mixdown_buffer)) {
+ return to_read;
+}
- /* gack. the things we do for users.
- */
+/** Read data directly from one of our sources, accounting for the situation when the track has a different channel
+ * count to the region.
+ *
+ * @param srcs Source list to get our source from.
+ * @param limit Furthest that we should read, as an offset from the region position.
+ * @param buf Buffer to write data into (existing contents of the buffer will be overwritten)
+ * @param position Position to read from, in session frames.
+ * @param cnt Number of frames to read.
+ * @param chan_n Channel to read from.
+ * @return Number of frames read.
+ */
+
+framecnt_t
+AudioRegion::read_from_sources (SourceList const & srcs, framecnt_t limit, Sample* buf, framepos_t position, framecnt_t cnt, uint32_t chan_n) const
+{
+ frameoffset_t const internal_offset = position - _position;
+ if (internal_offset >= limit) {
+ return 0;
+ }
+
+ framecnt_t const to_read = min (cnt, limit - internal_offset);
+ if (to_read == 0) {
+ return 0;
+ }
+
+ if (chan_n < n_channels()) {
+
+ boost::shared_ptr<AudioSource> src = boost::dynamic_pointer_cast<AudioSource> (srcs[chan_n]);
+ if (src->read (buf, _start + internal_offset, to_read) != to_read) {
+ return 0; /* "read nothing" */
+ }
+
+ } else {
+
+ /* track is N-channel, this region has fewer channels; silence the ones
+ we don't have.
+ */
+
+ if (Config->get_replicate_missing_region_channels()) {
+ /* track is N-channel, this region has less channels, so use a relevant channel
+ */
- buf += buf_offset;
+ uint32_t channel = n_channels() % chan_n;
+ boost::shared_ptr<AudioSource> src = boost::dynamic_pointer_cast<AudioSource> (srcs[channel]);
- for (nframes_t n = 0; n < to_read; ++n) {
- buf[n] += mixdown_buffer[n];
+ if (src->read (buf, _start + internal_offset, to_read) != to_read) {
+ return 0; /* "read nothing" */
+ }
}
}
child = node.add_child ("Envelope");
bool default_env = false;
-
+
// If there are only two points, the points are in the start of the region and the end of the region
// so, if they are both at 1.0f, that means the default region.
-
+
if (_envelope->size() == 2 &&
_envelope->front()->value == 1.0f &&
_envelope->back()->value==1.0f) {
default_env = true;
}
}
-
+
if (default_env) {
child->add_property ("default", "yes");
} else {
child->add_child_nocopy (_fade_in->get_state ());
}
+ if (_inverse_fade_in) {
+ child = node.add_child (X_("InvFadeIn"));
+ child->add_child_nocopy (_inverse_fade_in->get_state ());
+ }
+
child = node.add_child (X_("FadeOut"));
if (_default_fade_out) {
child->add_child_nocopy (_fade_out->get_state ());
}
+ if (_inverse_fade_out) {
+ child = node.add_child (X_("InvFadeOut"));
+ child->add_child_nocopy (_inverse_fade_out->get_state ());
+ }
+
return node;
}
const XMLNodeList& nlist = node.children();
const XMLProperty *prop;
LocaleGuard lg (X_("POSIX"));
- boost::shared_ptr<Playlist> the_playlist (_playlist.lock());
+ boost::shared_ptr<Playlist> the_playlist (_playlist.lock());
suspend_property_changes ();
/* Now find envelope description and other related child items */
- _envelope->freeze ();
+ _envelope->freeze ();
for (XMLNodeConstIterator niter = nlist.begin(); niter != nlist.end(); ++niter) {
XMLNode *child;
set_default_envelope ();
}
- _envelope->set_max_xval (_length);
_envelope->truncate_end (_length);
_fade_in->clear ();
- if ((prop = child->property ("default")) != 0 || (prop = child->property ("steepness")) != 0) {
+ if (((prop = child->property ("default")) != 0 && string_is_affirmative (prop->value())) || (prop = child->property ("steepness")) != 0) {
set_default_fade_in ();
} else {
XMLNode* grandchild = child->child ("AutomationList");
}
}
+ /* legacy a3 */
+
+ if ((prop = child->property ("is-xfade")) != 0) {
+ _fade_in_is_xfade = string_is_affirmative (prop->value());
+ }
+
} else if (child->name() == "FadeOut") {
_fade_out->clear ();
- if ((prop = child->property ("default")) != 0 || (prop = child->property ("steepness")) != 0) {
+ if (((prop = child->property ("default")) != 0 && (string_is_affirmative (prop->value()))) || (prop = child->property ("steepness")) != 0) {
set_default_fade_out ();
} else {
XMLNode* grandchild = child->child ("AutomationList");
_fade_out->set_state (*grandchild, version);
}
}
-
+
if ((prop = child->property ("active")) != 0) {
if (string_is_affirmative (prop->value())) {
set_fade_out_active (true);
}
}
+ /* legacy a3 */
+
+ if ((prop = child->property ("is-xfade")) != 0) {
+ _fade_out_is_xfade = string_is_affirmative (prop->value());
+ }
+
+ } else if (child->name() == "InvFadeIn") {
+ XMLNode* grandchild = child->child ("AutomationList");
+ if (grandchild) {
+ _inverse_fade_in->set_state (*grandchild, version);
+ }
+ } else if (child->name() == "InvFadeOut") {
+ XMLNode* grandchild = child->child ("AutomationList");
+ if (grandchild) {
+ _inverse_fade_out->set_state (*grandchild, version);
+ }
}
}
- _envelope->thaw ();
+ _envelope->thaw ();
resume_property_changes ();
-
+
if (send) {
send_change (what_changed);
}
void
AudioRegion::set_fade_in_shape (FadeShape shape)
{
- set_fade_in (shape, (nframes_t) _fade_in->back()->when);
+ set_fade_in (shape, (framecnt_t) _fade_in->back()->when);
}
void
AudioRegion::set_fade_out_shape (FadeShape shape)
{
- set_fade_out (shape, (nframes_t) _fade_out->back()->when);
+ set_fade_out (shape, (framecnt_t) _fade_out->back()->when);
}
void
AudioRegion::set_fade_in (boost::shared_ptr<AutomationList> f)
{
- _fade_in->freeze ();
+ _fade_in->freeze ();
*_fade_in = *f;
- _fade_in->thaw ();
-
+ _fade_in->thaw ();
+ _default_fade_in = false;
+
send_change (PropertyChange (Properties::fade_in));
}
void
AudioRegion::set_fade_in (FadeShape shape, framecnt_t len)
{
- _fade_in->freeze ();
+ boost::shared_ptr<Evoral::ControlList> c1 (new Evoral::ControlList (FadeInAutomation));
+ boost::shared_ptr<Evoral::ControlList> c2 (new Evoral::ControlList (FadeInAutomation));
+ boost::shared_ptr<Evoral::ControlList> c3 (new Evoral::ControlList (FadeInAutomation));
+
+ _fade_in->freeze ();
_fade_in->clear ();
+ _inverse_fade_in->clear ();
switch (shape) {
case FadeLinear:
_fade_in->fast_simple_add (0.0, 0.0);
_fade_in->fast_simple_add (len, 1.0);
+ reverse_curve (_inverse_fade_in, _fade_in);
break;
case FadeFast:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.389401, 0.0333333);
- _fade_in->fast_simple_add (len * 0.629032, 0.0861111);
- _fade_in->fast_simple_add (len * 0.829493, 0.233333);
- _fade_in->fast_simple_add (len * 0.9447, 0.483333);
- _fade_in->fast_simple_add (len * 0.976959, 0.697222);
- _fade_in->fast_simple_add (len, 1);
+ generate_db_fade (_fade_in, len, 10, -60);
+ reverse_curve (c1, _fade_in);
+ _fade_in->copy_events (*c1);
+ generate_inverse_power_curve (_inverse_fade_in, _fade_in);
break;
case FadeSlow:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.0207373, 0.197222);
- _fade_in->fast_simple_add (len * 0.0645161, 0.525);
- _fade_in->fast_simple_add (len * 0.152074, 0.802778);
- _fade_in->fast_simple_add (len * 0.276498, 0.919444);
- _fade_in->fast_simple_add (len * 0.481567, 0.980556);
- _fade_in->fast_simple_add (len * 0.767281, 1);
- _fade_in->fast_simple_add (len, 1);
+ generate_db_fade (c1, len, 10, -1); // start off with a slow fade
+ generate_db_fade (c2, len, 10, -80); // end with a fast fade
+ merge_curves (_fade_in, c1, c2);
+ reverse_curve (c3, _fade_in);
+ _fade_in->copy_events (*c3);
+ generate_inverse_power_curve (_inverse_fade_in, _fade_in);
break;
- case FadeLogA:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.0737327, 0.308333);
- _fade_in->fast_simple_add (len * 0.246544, 0.658333);
- _fade_in->fast_simple_add (len * 0.470046, 0.886111);
- _fade_in->fast_simple_add (len * 0.652074, 0.972222);
- _fade_in->fast_simple_add (len * 0.771889, 0.988889);
- _fade_in->fast_simple_add (len, 1);
+ case FadeConstantPower:
+ for (int i = 0; i < 9; ++i) {
+ float dist = (float) i / 10.0f;
+ _fade_in->fast_simple_add (len*dist, sin (dist*M_PI/2));
+ }
+ _fade_in->fast_simple_add (len, 1.0);
+ reverse_curve (_inverse_fade_in, _fade_in);
break;
-
- case FadeLogB:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.304147, 0.0694444);
- _fade_in->fast_simple_add (len * 0.529954, 0.152778);
- _fade_in->fast_simple_add (len * 0.725806, 0.333333);
- _fade_in->fast_simple_add (len * 0.847926, 0.558333);
- _fade_in->fast_simple_add (len * 0.919355, 0.730556);
- _fade_in->fast_simple_add (len, 1);
+
+ case FadeSymmetric:
+ //start with a nearly linear cuve
+ _fade_in->fast_simple_add (0, 1);
+ _fade_in->fast_simple_add (0.5*len, 0.6);
+ //now generate a fade-out curve by successively applying a gain drop
+ const float breakpoint = 0.7; //linear for first 70%
+ const int num_steps = 9;
+ for (int i = 2; i < num_steps; i++) {
+ float coeff = (1.0-breakpoint);
+ for (int j = 0; j < i; j++) {
+ coeff *= 0.5; //6dB drop per step
+ }
+ _fade_in->fast_simple_add (len* (breakpoint+((1.0-breakpoint)*(double)i/(double)num_steps)), coeff);
+ }
+ _fade_in->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (c3, _fade_in);
+ _fade_in->copy_events (*c3);
+ reverse_curve (_inverse_fade_in, _fade_in );
break;
}
- _fade_in->thaw ();
+ _default_fade_in = false;
+ _fade_in->thaw ();
send_change (PropertyChange (Properties::fade_in));
}
void
AudioRegion::set_fade_out (boost::shared_ptr<AutomationList> f)
{
- _fade_out->freeze ();
+ _fade_out->freeze ();
*_fade_out = *f;
- _fade_out->thaw ();
+ _fade_out->thaw ();
+ _default_fade_out = false;
send_change (PropertyChange (Properties::fade_in));
}
void
AudioRegion::set_fade_out (FadeShape shape, framecnt_t len)
{
- _fade_out->freeze ();
+ boost::shared_ptr<Evoral::ControlList> c1 (new Evoral::ControlList (FadeOutAutomation));
+ boost::shared_ptr<Evoral::ControlList> c2 (new Evoral::ControlList (FadeOutAutomation));
+
+ _fade_out->freeze ();
_fade_out->clear ();
+ _inverse_fade_out->clear ();
switch (shape) {
- case FadeFast:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.023041, 0.697222);
- _fade_out->fast_simple_add (len * 0.0553, 0.483333);
- _fade_out->fast_simple_add (len * 0.170507, 0.233333);
- _fade_out->fast_simple_add (len * 0.370968, 0.0861111);
- _fade_out->fast_simple_add (len * 0.610599, 0.0333333);
- _fade_out->fast_simple_add (len * 1, 0);
+ case FadeLinear:
+ _fade_out->fast_simple_add (0.0, 1.0);
+ _fade_out->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (_inverse_fade_out, _fade_out);
break;
-
- case FadeLogA:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.228111, 0.988889);
- _fade_out->fast_simple_add (len * 0.347926, 0.972222);
- _fade_out->fast_simple_add (len * 0.529954, 0.886111);
- _fade_out->fast_simple_add (len * 0.753456, 0.658333);
- _fade_out->fast_simple_add (len * 0.9262673, 0.308333);
- _fade_out->fast_simple_add (len * 1, 0);
+
+ case FadeFast:
+ generate_db_fade (_fade_out, len, 10, -60);
+ generate_inverse_power_curve (_inverse_fade_out, _fade_out);
break;
-
- case FadeSlow:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.305556, 1);
- _fade_out->fast_simple_add (len * 0.548611, 0.991736);
- _fade_out->fast_simple_add (len * 0.759259, 0.931129);
- _fade_out->fast_simple_add (len * 0.918981, 0.68595);
- _fade_out->fast_simple_add (len * 0.976852, 0.22865);
- _fade_out->fast_simple_add (len * 1, 0);
+
+ case FadeSlow:
+ generate_db_fade (c1, len, 10, -1); //start off with a slow fade
+ generate_db_fade (c2, len, 10, -80); //end with a fast fade
+ merge_curves (_fade_out, c1, c2);
+ generate_inverse_power_curve (_inverse_fade_out, _fade_out);
break;
- case FadeLogB:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.080645, 0.730556);
- _fade_out->fast_simple_add (len * 0.277778, 0.289256);
- _fade_out->fast_simple_add (len * 0.470046, 0.152778);
- _fade_out->fast_simple_add (len * 0.695853, 0.0694444);
- _fade_out->fast_simple_add (len * 1, 0);
+ case FadeConstantPower:
+ //constant-power fades use a sin/cos relationship
+ //the cutoff is abrupt but it has the benefit of being symmetrical
+ _fade_out->fast_simple_add (0.0, 1.0);
+ for (int i = 1; i < 9; i++ ) {
+ float dist = (float)i/10.0;
+ _fade_out->fast_simple_add ((len * dist), cos(dist*M_PI/2));
+ }
+ _fade_out->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (_inverse_fade_out, _fade_out);
break;
-
- case FadeLinear:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 1, 0);
+
+ case FadeSymmetric:
+ //start with a nearly linear cuve
+ _fade_out->fast_simple_add (0, 1);
+ _fade_out->fast_simple_add (0.5*len, 0.6);
+
+ //now generate a fade-out curve by successively applying a gain drop
+ const float breakpoint = 0.7; //linear for first 70%
+ const int num_steps = 9;
+ for (int i = 2; i < num_steps; i++) {
+ float coeff = (1.0-breakpoint);
+ for (int j = 0; j < i; j++) {
+ coeff *= 0.5; //6dB drop per step
+ }
+ _fade_out->fast_simple_add (len* (breakpoint+((1.0-breakpoint)*(double)i/(double)num_steps)), coeff);
+ }
+ _fade_out->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (_inverse_fade_out, _fade_out);
break;
}
- _fade_out->thaw ();
- send_change (PropertyChange (Properties::fade_in));
+ _default_fade_out = false;
+ _fade_out->thaw ();
+ send_change (PropertyChange (Properties::fade_out));
}
void
if (len > _length) {
len = _length - 1;
}
+
+ if (len < 64) {
+ len = 64;
+ }
bool changed = _fade_in->extend_to (len);
if (changed) {
+ if (_inverse_fade_in) {
+ _inverse_fade_in->extend_to (len);
+ }
+
+ if (_session.config.get_xfade_model() == FullCrossfade &&
+ _session.config.get_auto_xfade() &&
+ _fade_in_is_xfade && !_fade_in_is_short) {
+
+ /* trim a single other region below us to the new start
+ of the fade.
+ */
+
+ boost::shared_ptr<Region> other = get_single_other_xfade_region (true);
+ if (other) {
+ other->trim_end (position() + len);
+ }
+ }
+
_default_fade_in = false;
send_change (PropertyChange (Properties::fade_in));
}
len = _length - 1;
}
+ if (len < 64) {
+ len = 64;
+ }
+
bool changed = _fade_out->extend_to (len);
if (changed) {
+
+ if (_inverse_fade_out) {
+ _inverse_fade_out->extend_to (len);
+ }
_default_fade_out = false;
+
+ if (_session.config.get_xfade_model() == FullCrossfade &&
+ _session.config.get_auto_xfade() &&
+ _fade_out_is_xfade && !_fade_out_is_short) {
+
+ /* trim a single other region below us to the new start
+ of the fade.
+ */
+
+ boost::shared_ptr<Region> other = get_single_other_xfade_region (false);
+ if (other) {
+ other->trim_front (last_frame() - len);
+ }
+ }
+
send_change (PropertyChange (Properties::fade_out));
}
}
AudioRegion::set_default_fade_in ()
{
_fade_in_suspended = 0;
+ _fade_in_is_xfade = false;
+ _fade_in_is_short = true;
set_fade_in (FadeLinear, 64);
}
AudioRegion::set_default_fade_out ()
{
_fade_out_suspended = 0;
+ _fade_out_is_xfade = false;
+ _fade_out_is_short = true;
set_fade_out (FadeLinear, 64);
}
void
AudioRegion::set_default_envelope ()
{
- _envelope->freeze ();
+ _envelope->freeze ();
_envelope->clear ();
_envelope->fast_simple_add (0, 1.0f);
_envelope->fast_simple_add (_length, 1.0f);
- _envelope->thaw ();
+ _envelope->thaw ();
}
void
based on the the existing curve.
*/
- _envelope->freeze ();
+ _envelope->freeze ();
_envelope->truncate_end (_length);
- _envelope->set_max_xval (_length);
- _envelope->thaw ();
-
+ _envelope->thaw ();
+
suspend_property_changes();
- if (_left_of_split) {
- set_default_fade_out ();
- _left_of_split = false;
- } else if (_fade_out->back()->when > _length) {
- _fade_out->extend_to (_length);
- send_change (PropertyChange (Properties::fade_out));
- }
-
+ if (_left_of_split) {
+ set_default_fade_out ();
+ _left_of_split = false;
+ } else if (_fade_out->back()->when > _length) {
+ _fade_out->extend_to (_length);
+ send_change (PropertyChange (Properties::fade_out));
+ }
+
if (_fade_in->back()->when > _length) {
_fade_in->extend_to (_length);
send_change (PropertyChange (Properties::fade_in));
}
-
+
resume_property_changes();
}
/* as above, but the shift was from the front */
_envelope->truncate_start (_length);
-
+
suspend_property_changes();
- if (_right_of_split) {
- set_default_fade_in ();
- _right_of_split = false;
- } else if (_fade_in->back()->when > _length) {
+ if (_right_of_split) {
+ set_default_fade_in ();
+ _right_of_split = false;
+ } else if (_fade_in->back()->when > _length) {
_fade_in->extend_to (_length);
send_change (PropertyChange (Properties::fade_in));
}
_fade_out->extend_to (_length);
send_change (PropertyChange (Properties::fade_out));
}
-
+
resume_property_changes();
}
*/
PropertyList plist;
-
+
plist.add (Properties::start, _start.val());
plist.add (Properties::length, _length.val());
plist.add (Properties::name, new_name);
- plist.add (Properties::layer, _layer.val());
+ plist.add (Properties::layer, layer ());
v.push_back(RegionFactory::create (srcs, plist));
v.back()->set_whole_file (false);
framecnt_t
AudioRegion::read_raw_internal (Sample* buf, framepos_t pos, framecnt_t cnt, int channel) const
{
- return audio_source()->read (buf, pos, cnt, channel);
-}
-
-int
-AudioRegion::exportme (Session& /*session*/, ARDOUR::ExportSpecification& /*spec*/)
-{
- // TODO EXPORT
-// const nframes_t blocksize = 4096;
-// nframes_t to_read;
-// int status = -1;
-//
-// spec.channels = _sources.size();
-//
-// if (spec.prepare (blocksize, session.frame_rate())) {
-// goto out;
-// }
-//
-// spec.pos = 0;
-// spec.total_frames = _length;
-//
-// while (spec.pos < _length && !spec.stop) {
-//
-//
-// /* step 1: interleave */
-//
-// to_read = min (_length - spec.pos, blocksize);
-//
-// if (spec.channels == 1) {
-//
-// if (read_raw_internal (spec.dataF, _start + spec.pos, to_read) != to_read) {
-// goto out;
-// }
-//
-// } else {
-//
-// Sample buf[blocksize];
-//
-// for (uint32_t chan = 0; chan < spec.channels; ++chan) {
-//
-// if (audio_source(chan)->read (buf, _start + spec.pos, to_read) != to_read) {
-// goto out;
-// }
-//
-// for (nframes_t x = 0; x < to_read; ++x) {
-// spec.dataF[chan+(x*spec.channels)] = buf[x];
-// }
-// }
-// }
-//
-// if (spec.process (to_read)) {
-// goto out;
-// }
-//
-// spec.pos += to_read;
-// spec.progress = (double) spec.pos /_length;
-//
-// }
-//
-// status = 0;
-//
-// out:
-// spec.running = false;
-// spec.status = status;
-// spec.clear();
-//
-// return status;
- return 0;
+ return audio_source(channel)->read (buf, pos, cnt);
}
void
framecnt_t const blocksize = 64 * 1024;
Sample buf[blocksize];
-
+
while (fpos < fend) {
uint32_t n;
/* read it in */
- if (read_raw_internal (buf, fpos, to_read, 0) != to_read) {
+ if (read_raw_internal (buf, fpos, to_read, n) != to_read) {
return 0;
}
}
fpos += to_read;
- p->set_progress (float (fpos - _start) / _length);
- if (p->cancelled ()) {
- return -1;
+ if (p) {
+ p->set_progress (float (fpos - _start) / _length);
+ if (p->cancelled ()) {
+ return -1;
+ }
}
}
if (afs && afs->destructive()) {
// set_start (source()->natural_position(), this);
- set_position (source()->natural_position(), this);
+ set_position (source()->natural_position());
}
}
return boost::dynamic_pointer_cast<AudioSource>(source(n));
}
-int
+int
AudioRegion::adjust_transients (frameoffset_t delta)
{
for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) {
(*x) = (*x) + delta;
}
-
+
send_change (PropertyChange (Properties::valid_transients));
-
- return 0;
-}
+
+ return 0;
+}
int
AudioRegion::update_transient (framepos_t old_position, framepos_t new_position)
if ((*x) == old_position) {
(*x) = new_position;
send_change (PropertyChange (Properties::valid_transients));
-
+
break;
}
}
-
+
return 0;
}
{
_transients.push_back(where);
_valid_transients = true;
-
+
send_change (PropertyChange (Properties::valid_transients));
}
{
_transients.remove(where);
_valid_transients = true;
-
+
send_change (PropertyChange (Properties::valid_transients));
}
_transients.clear();
_transients = results;
_valid_transients = true;
-
+
send_change (PropertyChange (Properties::valid_transients));
-
+
return 0;
}
/* no existing/complete transient info */
- static bool analyse_dialog_shown = false; /* global per instance of Ardour */
+ static bool analyse_dialog_shown = false; /* global per instance of Ardour */
if (!Config->get_auto_analyse_audio()) {
- if (!analyse_dialog_shown) {
- pl->session().Dialog (_("\
+ if (!analyse_dialog_shown) {
+ pl->session().Dialog (_("\
You have requested an operation that requires audio analysis.\n\n\
You currently have \"auto-analyse-audio\" disabled, which means \
that transient data must be generated every time it is required.\n\n\
This dialog will not display again. But you may notice a slight delay \
in this and future transient-detection operations.\n\
"));
- analyse_dialog_shown = true;
- }
+ analyse_dialog_shown = true;
+ }
}
TransientDetector t (pl->session().frame_rate());
*
* @param threshold Threshold below which signal is considered silence (as a sample value)
* @param min_length Minimum length of silent period to be reported.
- * @return Silent periods; first of pair is the offset within the region, second is the length of the period
+ * @return Silent intervals, measured relative to the region start in the source
*/
-std::list<std::pair<frameoffset_t, framecnt_t> >
+AudioIntervalResult
AudioRegion::find_silence (Sample threshold, framecnt_t min_length, InterThreadInfo& itt) const
{
framecnt_t const block_size = 64 * 1024;
- Sample loudest[block_size];
- Sample buf[block_size];
+ boost::scoped_array<Sample> loudest (new Sample[block_size]);
+ boost::scoped_array<Sample> buf (new Sample[block_size]);
framepos_t pos = _start;
framepos_t const end = _start + _length - 1;
- std::list<std::pair<frameoffset_t, framecnt_t> > silent_periods;
+ AudioIntervalResult silent_periods;
bool in_silence = false;
frameoffset_t silence_start = 0;
- bool silence;
while (pos < end && !itt.cancel) {
/* fill `loudest' with the loudest absolute sample at each instant, across all channels */
- memset (loudest, 0, sizeof (Sample) * block_size);
+ memset (loudest.get(), 0, sizeof (Sample) * block_size);
for (uint32_t n = 0; n < n_channels(); ++n) {
- read_raw_internal (buf, pos, block_size, n);
+ read_raw_internal (buf.get(), pos, block_size, n);
for (framecnt_t i = 0; i < block_size; ++i) {
loudest[i] = max (loudest[i], abs (buf[i]));
}
/* now look for silence */
for (framecnt_t i = 0; i < block_size; ++i) {
- silence = abs (loudest[i]) < threshold;
+ bool const silence = abs (loudest[i]) < threshold;
if (silence && !in_silence) {
/* non-silence to silence */
in_silence = true;
}
pos += block_size;
- itt.progress = (end-pos)/(double)_length;
+ itt.progress = (end-pos)/(double)_length;
}
if (in_silence && end - 1 - silence_start >= min_length) {
silent_periods.push_back (std::make_pair (silence_start, end));
}
- itt.done = true;
+ itt.done = true;
return silent_periods;
}
+Evoral::Range<framepos_t>
+AudioRegion::body_range () const
+{
+ return Evoral::Range<framepos_t> (first_frame() + _fade_in->back()->when, last_frame() - _fade_out->back()->when);
+}
+
+void
+AudioRegion::set_fade_in_is_xfade (bool yn)
+{
+ if (yn == _fade_in_is_xfade) {
+ return;
+ }
+
+ _fade_in_is_xfade = yn;
+ send_change (PropertyChange (Properties::fade_in_is_xfade));
+}
+
+void
+AudioRegion::set_fade_out_is_xfade (bool yn)
+{
+ if (yn == _fade_out_is_xfade) {
+ return;
+ }
+
+ _fade_out_is_xfade = yn;
+ send_change (PropertyChange (Properties::fade_out_is_xfade));
+}
+
+void
+AudioRegion::set_fade_in_is_short (bool yn)
+{
+ if (yn == _fade_in_is_short) {
+ return;
+ }
+
+ _fade_in_is_short = yn;
+ send_change (PropertyChange (Properties::fade_in_is_short));
+
+}
+
+void
+AudioRegion::set_fade_out_is_short (bool yn)
+{
+ if (yn == _fade_out_is_short) {
+ return;
+ }
+
+ _fade_out_is_short = yn;
+ send_change (PropertyChange (Properties::fade_out_is_short));
+}
+
+boost::shared_ptr<Region>
+AudioRegion::get_single_other_xfade_region (bool start) const
+{
+ boost::shared_ptr<Playlist> pl (playlist());
+
+ if (!pl) {
+ /* not currently in a playlist - xfade length is unbounded
+ (and irrelevant)
+ */
+ return boost::shared_ptr<AudioRegion> ();
+ }
+
+ boost::shared_ptr<RegionList> rl;
+
+ if (start) {
+ rl = pl->regions_at (position());
+ } else {
+ rl = pl->regions_at (last_frame());
+ }
+
+ RegionList::iterator i;
+ boost::shared_ptr<Region> other;
+ uint32_t n = 0;
+
+ /* count and find the other region in a single pass through the list */
+
+ for (i = rl->begin(); i != rl->end(); ++i) {
+ if ((*i).get() != this) {
+ other = *i;
+ }
+ ++n;
+ }
+
+ if (n != 2) {
+ /* zero or multiple regions stacked here - don't care about xfades */
+ return boost::shared_ptr<AudioRegion> ();
+ }
+
+ return other;
+}
+
+framecnt_t
+AudioRegion::verify_xfade_bounds (framecnt_t len, bool start)
+{
+ /* this is called from a UI to check on whether a new proposed
+ length for an xfade is legal or not. it returns the legal
+ length corresponding to @a len which may be shorter than or
+ equal to @a len itself.
+ */
+
+ boost::shared_ptr<Region> other = get_single_other_xfade_region (start);
+ framecnt_t maxlen;
+
+ if (!other) {
+ /* zero or > 2 regions here, don't care about len, but
+ it can't be longer than the region itself.
+ */
+ return min (length(), len);
+ }
+
+ /* we overlap a single region. clamp the length of an xfade to
+ the maximum possible duration of the overlap (if the other
+ region were trimmed appropriately).
+ */
+ if (start) {
+ maxlen = other->latest_possible_frame() - position();
+ } else {
+ maxlen = last_frame() - other->earliest_possible_position();
+ }
+
+ return min (length(), min (maxlen, len));
+
+}
extern "C" {