X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=libs%2Fardour%2Faudioregion.cc;h=6b549b3d13fc9c7ff7926d9652fd47fc1cf91cc0;hb=6739b6a1e31f943f039b3c1678190af4fe0f8d16;hp=9a9459f9b90435b36e44d6df7bd11e993423c811;hpb=861d6f81a3f455d6d2cce6b698507404db73d9bf;p=ardour.git diff --git a/libs/ardour/audioregion.cc b/libs/ardour/audioregion.cc index 9a9459f9b9..6b549b3d13 100644 --- a/libs/ardour/audioregion.cc +++ b/libs/ardour/audioregion.cc @@ -109,11 +109,9 @@ generate_db_fade (boost::shared_ptr dst, double len, int nu //generate a fade-out curve by successively applying a gain drop float fade_speed = dB_to_coefficient(dB_drop / (float) num_steps); + float coeff = GAIN_COEFF_UNITY; for (int i = 1; i < (num_steps-1); i++) { - float coeff = GAIN_COEFF_UNITY; - for (int j = 0; j < i; j++) { - coeff *= fade_speed; - } + coeff *= fade_speed; dst->fast_simple_add (len*(double)i/(double)num_steps, coeff); } @@ -121,8 +119,8 @@ generate_db_fade (boost::shared_ptr dst, double len, int nu } static void -merge_curves (boost::shared_ptr dst, - boost::shared_ptr curve1, +merge_curves (boost::shared_ptr dst, + boost::shared_ptr curve1, boost::shared_ptr curve2) { Evoral::ControlList::EventList::size_type size = curve1->size(); @@ -131,13 +129,13 @@ merge_curves (boost::shared_ptr dst, if (size != curve2->size()) { return; } - + Evoral::ControlList::const_iterator c1 = curve1->begin(); int count = 0; for (Evoral::ControlList::const_iterator c2 = curve2->begin(); c2!=curve2->end(); c2++ ) { float v1 = accurate_coefficient_to_dB((*c1)->value); float v2 = accurate_coefficient_to_dB((*c2)->value); - + double interp = v1 * ( 1.0-( (double)count / (double)size) ); interp += v2 * ( (double)count / (double)size ); @@ -422,7 +420,7 @@ AudioRegion::set_envelope_active (bool yn) * @param chan_n Channel. * @param frames_per_pixel Number of samples to use to generate one peak value. */ - + ARDOUR::framecnt_t AudioRegion::read_peaks (PeakData *buf, framecnt_t npeaks, framecnt_t offset, framecnt_t cnt, uint32_t chan_n, double frames_per_pixel) const { @@ -432,7 +430,7 @@ AudioRegion::read_peaks (PeakData *buf, framecnt_t npeaks, framecnt_t offset, fr if (audio_source(chan_n)->read_peaks (buf, npeaks, offset, cnt, frames_per_pixel)) { return 0; - } + } if (_scale_amplitude != 1.0f) { for (framecnt_t n = 0; n < npeaks; ++n) { @@ -491,7 +489,7 @@ AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, */ assert (cnt >= 0); - + if (n_channels() == 0) { return 0; } @@ -521,29 +519,29 @@ AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, of any fade out that we are dealing with */ frameoffset_t fade_out_offset = 0; - + /* Amount (length) of fade out that we are dealing with in this read */ framecnt_t fade_out_limit = 0; framecnt_t fade_interval_start = 0; /* Fade in */ - + if (_fade_in_active && _session.config.get_use_region_fades()) { - + framecnt_t fade_in_length = (framecnt_t) _fade_in->back()->when; /* see if this read is within the fade in */ - + if (internal_offset < fade_in_length) { fade_in_limit = min (to_read, fade_in_length - internal_offset); } } - + /* Fade out */ - + if (_fade_out_active && _session.config.get_use_region_fades()) { - + /* see if some part of this read is within the fade out */ /* ................. >| REGION @@ -565,7 +563,7 @@ AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, fade_interval_start = max (internal_offset, _length - framecnt_t (_fade_out->back()->when)); framecnt_t fade_interval_end = min(internal_offset + to_read, _length.val()); - + if (fade_interval_end > fade_interval_start) { /* (part of the) the fade out is in this buffer */ fade_out_limit = fade_interval_end - fade_interval_start; @@ -603,7 +601,7 @@ AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, /* APPLY FADES TO THE DATA IN mixdown_buffer AND MIX THE RESULTS INTO * buf. The key things to realize here: (1) the fade being applied is - * (as of April 26th 2012) just the inverse of the fade in curve (2) + * (as of April 26th 2012) just the inverse of the fade in curve (2) * "buf" contains data from lower regions already. So this operation * fades out the existing material. */ @@ -616,26 +614,26 @@ AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, /* explicit inverse fade in curve (e.g. for constant * power), so we have to fetch it. */ - + _inverse_fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit); - + /* Fade the data from lower layers out */ for (framecnt_t n = 0; n < fade_in_limit; ++n) { buf[n] *= gain_buffer[n]; } - + /* refill gain buffer with the fade in */ - + _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit); - + } else { - + /* no explicit inverse fade in, so just use (1 - fade * in) for the fade out of lower layers */ - + _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit); - + for (framecnt_t n = 0; n < fade_in_limit; ++n) { buf[n] *= 1 - gain_buffer[n]; } @@ -656,27 +654,27 @@ AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, if (opaque()) { if (_inverse_fade_out) { - + _inverse_fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit); - + /* Fade the data from lower levels in */ for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) { buf[m] *= gain_buffer[n]; } - + /* fetch the actual fade out */ _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit); - + } else { /* no explicit inverse fade out (which is * actually a fade in), so just use (1 - fade * out) for the fade in of lower layers */ - + _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit); - + for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) { buf[m] *= 1 - gain_buffer[n]; } @@ -692,7 +690,7 @@ AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer, buf[m] += mixdown_buffer[m] * gain_buffer[n]; } } - + /* MIX OR COPY THE REGION BODY FROM mixdown_buffer INTO buf */ framecnt_t const N = to_read - fade_in_limit - fade_out_limit; @@ -733,7 +731,7 @@ AudioRegion::read_from_sources (SourceList const & srcs, framecnt_t limit, Sampl if (to_read == 0) { return 0; } - + if (chan_n < n_channels()) { boost::shared_ptr src = boost::dynamic_pointer_cast (srcs[chan_n]); @@ -759,7 +757,7 @@ AudioRegion::read_from_sources (SourceList const & srcs, framecnt_t limit, Sampl } } else { - + /* use silence */ memset (buf, 0, sizeof (Sample) * to_read); } @@ -921,7 +919,7 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ _fade_out->set_state (*grandchild, version); } } - + if ((prop = child->property ("active")) != 0) { if (string_is_affirmative (prop->value())) { set_fade_out_active (true); @@ -929,7 +927,7 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ set_fade_out_active (false); } } - + } else if ( (child->name() == "InverseFadeIn") || (child->name() == "InvFadeIn") ) { XMLNode* grandchild = child->child ("AutomationList"); if (grandchild) { @@ -1063,7 +1061,7 @@ AudioRegion::set_fade_in (FadeShape shape, framecnt_t len) _fade_in->fast_simple_add (len, GAIN_COEFF_UNITY); reverse_curve (_inverse_fade_in.val(), _fade_in.val()); break; - + case FadeSymmetric: //start with a nearly linear cuve _fade_in->fast_simple_add (0, 1); @@ -1119,13 +1117,13 @@ AudioRegion::set_fade_out (FadeShape shape, framecnt_t len) _fade_out->fast_simple_add (len, GAIN_COEFF_SMALL); reverse_curve (_inverse_fade_out.val(), _fade_out.val()); break; - - case FadeFast: + + case FadeFast: generate_db_fade (_fade_out.val(), len, num_steps, -60); generate_inverse_power_curve (_inverse_fade_out.val(), _fade_out.val()); break; - - case FadeSlow: + + case FadeSlow: generate_db_fade (c1, len, num_steps, -1); //start off with a slow fade generate_db_fade (c2, len, num_steps, -80); //end with a fast fade merge_curves (_fade_out.val(), c1, c2); @@ -1143,7 +1141,7 @@ AudioRegion::set_fade_out (FadeShape shape, framecnt_t len) _fade_out->fast_simple_add (len, GAIN_COEFF_SMALL); reverse_curve (_inverse_fade_out.val(), _fade_out.val()); break; - + case FadeSymmetric: //start with a nearly linear cuve _fade_out->fast_simple_add (0, 1); @@ -1173,7 +1171,7 @@ AudioRegion::set_fade_in_length (framecnt_t len) if (len > _length) { len = _length - 1; } - + if (len < 64) { len = 64; } @@ -1204,7 +1202,7 @@ AudioRegion::set_fade_out_length (framecnt_t len) bool changed = _fade_out->extend_to (len); if (changed) { - + if (_inverse_fade_out) { _inverse_fade_out->extend_to (len); } @@ -1573,10 +1571,10 @@ AudioRegion::get_related_audio_file_channel_count () const { uint32_t chan_count = 0; for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) { - + boost::shared_ptr sndf = boost::dynamic_pointer_cast(*i); if (sndf ) { - + if (sndf->channel_count() > chan_count) { chan_count = sndf->channel_count(); } @@ -1592,7 +1590,7 @@ AudioRegion::get_related_audio_file_channel_count () const } #endif // HAVE_COREAUDIO } - + return chan_count; } @@ -1805,88 +1803,63 @@ AudioRegion::find_silence (Sample threshold, framecnt_t min_length, framecnt_t f boost::scoped_array loudest (new Sample[block_size]); boost::scoped_array buf (new Sample[block_size]); + assert (fade_length >= 0); + assert (min_length > 0); + framepos_t pos = _start; - framepos_t const end = _start + _length - 1; + framepos_t const end = _start + _length; AudioIntervalResult silent_periods; - bool in_silence = false; - frameoffset_t silence_start = 0; - frameoffset_t silence_end = 0; + bool in_silence = true; + frameoffset_t silence_start = _start; - framecnt_t continuous_signal = fade_length; - framecnt_t hold_off = 0; while (pos < end && !itt.cancel) { + framecnt_t cur_samples = 0; + framecnt_t const to_read = min (end - pos, block_size); /* fill `loudest' with the loudest absolute sample at each instant, across all channels */ memset (loudest.get(), 0, sizeof (Sample) * block_size); + for (uint32_t n = 0; n < n_channels(); ++n) { - read_raw_internal (buf.get(), pos, block_size, n); - for (framecnt_t i = 0; i < block_size; ++i) { + cur_samples = read_raw_internal (buf.get(), pos, to_read, n); + for (framecnt_t i = 0; i < cur_samples; ++i) { loudest[i] = max (loudest[i], abs (buf[i])); } } /* now look for silence */ - for (framecnt_t i = 0; i < block_size; ++i) { + for (framecnt_t i = 0; i < cur_samples; ++i) { bool const silence = abs (loudest[i]) < threshold; if (silence && !in_silence) { /* non-silence to silence */ in_silence = true; - /* process last queued silent part if any */ - if (hold_off > 0) { - assert (hold_off < fade_length); - silence_end -= hold_off; - if (silence_end - silence_start >= min_length) { - silent_periods.push_back (std::make_pair (silence_start, silence_end)); - } - } - hold_off = 0; - - if (continuous_signal < fade_length) { - silence_start = pos + i + fade_length - continuous_signal; - } else { - silence_start = pos + i; - } + silence_start = pos + i + fade_length; } else if (!silence && in_silence) { /* silence to non-silence */ in_silence = false; - hold_off = 0; - if (pos + i - 1 - silence_start >= min_length) { - /* queue silence */ - silence_end = pos + i - 1; - hold_off = 1; - } - } + frameoffset_t silence_end = pos + i - 1 - fade_length; - if (hold_off > 0) { - assert (!in_silence); - if (++hold_off >= fade_length) { + if (silence_end - silence_start >= min_length) { silent_periods.push_back (std::make_pair (silence_start, silence_end)); - hold_off = 0; } } - - if (!silence) { - ++continuous_signal; - } else { - continuous_signal = 0; - } } - pos += block_size; - itt.progress = (end-pos)/(double)_length; + pos += cur_samples; + itt.progress = (end - pos) / (double)_length; + + if (cur_samples == 0) { + assert (pos >= end); + break; + } } - if (in_silence) { + if (in_silence && !itt.cancel) { /* last block was silent, so finish off the last period */ - assert (hold_off == 0); - if (continuous_signal < fade_length) { - silence_start += fade_length - continuous_signal; - } - if (end - 1 - silence_start >= min_length) { - silent_periods.push_back (std::make_pair (silence_start, end)); + if (end - 1 - silence_start >= min_length + fade_length) { + silent_periods.push_back (std::make_pair (silence_start, end - 1)); } } @@ -1920,7 +1893,7 @@ AudioRegion::get_single_other_xfade_region (bool start) const } else { rl = pl->regions_at (last_frame()); } - + RegionList::iterator i; boost::shared_ptr other; uint32_t n = 0; @@ -1973,6 +1946,6 @@ AudioRegion::verify_xfade_bounds (framecnt_t len, bool start) } return min (length(), min (maxlen, len)); - + }