X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=libs%2Fardour%2Faudioregion.cc;h=0a33119ecf29b42e315084a9f821e1db83de2b3f;hb=b23d7d9170a12cbcd788e1c2cbcf688f376752ed;hp=ddd152bf0e89413dbfc8f9010d9e218752dc99fd;hpb=1c83df80db4ccea14a9c0d0722cb2393c502dd6e;p=ardour.git diff --git a/libs/ardour/audioregion.cc b/libs/ardour/audioregion.cc index ddd152bf0e..0a33119ecf 100644 --- a/libs/ardour/audioregion.cc +++ b/libs/ardour/audioregion.cc @@ -55,7 +55,7 @@ #include "ardour/coreaudiosource.h" #endif // HAVE_COREAUDIO -#include "i18n.h" +#include "pbd/i18n.h" #include using namespace std; @@ -213,7 +213,7 @@ AudioRegion::register_properties () , _fade_in (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_fade_in.val()))) \ , _inverse_fade_in (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_inverse_fade_in.val()))) \ , _fade_out (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_fade_out.val()))) \ - , _inverse_fade_out (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_inverse_fade_out.val()))) + , _inverse_fade_out (Properties::fade_in, boost::shared_ptr (new AutomationList (*other->_inverse_fade_out.val()))) \ /* a Session will reset these to its chosen defaults by calling AudioRegion::set_default_fade() */ void @@ -279,13 +279,13 @@ AudioRegion::AudioRegion (boost::shared_ptr other) assert (_sources.size() == _master_sources.size()); } -AudioRegion::AudioRegion (boost::shared_ptr other, framecnt_t offset) +AudioRegion::AudioRegion (boost::shared_ptr other, MusicFrame offset) : Region (other, offset) , AUDIOREGION_COPY_STATE (other) /* As far as I can see, the _envelope's times are relative to region position, and have nothing to do with sources (and hence _start). So when we copy the envelope, we just use the supplied offset. */ - , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList (*other->_envelope.val(), offset, other->_length))) + , _envelope (Properties::envelope, boost::shared_ptr (new AutomationList (*other->_envelope.val(), offset.frame, other->_length))) , _automatable (other->session()) , _fade_in_suspended (0) , _fade_out_suspended (0) @@ -372,7 +372,7 @@ void AudioRegion::connect_to_analysis_changed () { for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) { - (*i)->AnalysisChanged.connect_same_thread (*this, boost::bind (&AudioRegion::invalidate_transients, this)); + (*i)->AnalysisChanged.connect_same_thread (*this, boost::bind (&AudioRegion::maybe_invalidate_transients, this)); } } @@ -770,11 +770,9 @@ XMLNode& AudioRegion::get_basic_state () { XMLNode& node (Region::state ()); - char buf[64]; - LocaleGuard lg (X_("C")); + LocaleGuard lg; - snprintf (buf, sizeof (buf), "%u", (uint32_t) _sources.size()); - node.add_property ("channels", buf); + node.set_property ("channels", (uint32_t)_sources.size()); return node; } @@ -784,7 +782,7 @@ AudioRegion::state () { XMLNode& node (get_basic_state()); XMLNode *child; - LocaleGuard lg (X_("C")); + LocaleGuard lg; child = node.add_child ("Envelope"); @@ -802,7 +800,7 @@ AudioRegion::state () } if (default_env) { - child->add_property ("default", "yes"); + child->set_property ("default", "yes"); } else { child->add_child_nocopy (_envelope->get_state ()); } @@ -810,7 +808,7 @@ AudioRegion::state () child = node.add_child (X_("FadeIn")); if (_default_fade_in) { - child->add_property ("default", "yes"); + child->set_property ("default", "yes"); } else { child->add_child_nocopy (_fade_in->get_state ()); } @@ -823,7 +821,7 @@ AudioRegion::state () child = node.add_child (X_("FadeOut")); if (_default_fade_out) { - child->add_property ("default", "yes"); + child->set_property ("default", "yes"); } else { child->add_child_nocopy (_fade_out->get_state ()); } @@ -840,8 +838,7 @@ int AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_changed, bool send) { const XMLNodeList& nlist = node.children(); - const XMLProperty *prop; - LocaleGuard lg (X_("C")); + LocaleGuard lg; boost::shared_ptr the_playlist (_playlist.lock()); suspend_property_changes (); @@ -857,10 +854,10 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ Region::_set_state (node, version, what_changed, false); - if ((prop = node.property ("scale-gain")) != 0) { - float a = atof (prop->value().c_str()); - if (a != _scale_amplitude) { - _scale_amplitude = a; + float val; + if (node.get_property ("scale-gain", val)) { + if (val != _scale_amplitude) { + _scale_amplitude = val; what_changed.add (Properties::scale_amplitude); } } @@ -871,7 +868,7 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ for (XMLNodeConstIterator niter = nlist.begin(); niter != nlist.end(); ++niter) { XMLNode *child; - XMLProperty *prop; + XMLProperty const * prop; child = (*niter); @@ -890,7 +887,8 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ _fade_in->clear (); - if (((prop = child->property ("default")) != 0 && string_is_affirmative (prop->value())) || (prop = child->property ("steepness")) != 0) { + bool is_default; + if ((child->get_property ("default", is_default) && is_default) || (prop = child->property ("steepness")) != 0) { set_default_fade_in (); } else { XMLNode* grandchild = child->child ("AutomationList"); @@ -899,19 +897,17 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ } } - if ((prop = child->property ("active")) != 0) { - if (string_is_affirmative (prop->value())) { - set_fade_in_active (true); - } else { - set_fade_in_active (false); - } + bool is_active; + if (child->get_property ("active", is_active)) { + set_fade_in_active (is_active); } } else if (child->name() == "FadeOut") { _fade_out->clear (); - if (((prop = child->property ("default")) != 0 && (string_is_affirmative (prop->value()))) || (prop = child->property ("steepness")) != 0) { + bool is_default; + if ((child->get_property ("default", is_default) && is_default) || (prop = child->property ("steepness")) != 0) { set_default_fade_out (); } else { XMLNode* grandchild = child->child ("AutomationList"); @@ -920,12 +916,9 @@ AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_ } } - if ((prop = child->property ("active")) != 0) { - if (string_is_affirmative (prop->value())) { - set_fade_out_active (true); - } else { - set_fade_out_active (false); - } + bool is_active; + if (child->get_property ("active", is_active)) { + set_fade_out_active (is_active); } } else if ( (child->name() == "InverseFadeIn") || (child->name() == "InvFadeIn") ) { @@ -1095,7 +1088,7 @@ AudioRegion::set_fade_out (boost::shared_ptr f) _fade_out->thaw (); _default_fade_out = false; - send_change (PropertyChange (Properties::fade_in)); + send_change (PropertyChange (Properties::fade_out)); } void @@ -1402,9 +1395,6 @@ AudioRegion::set_scale_amplitude (gain_t g) send_change (PropertyChange (Properties::scale_amplitude)); } -/** @return the maximum (linear) amplitude of the region, or a -ve - * number if the Progress object reports that the process was cancelled. - */ double AudioRegion::maximum_amplitude (Progress* p) const { @@ -1444,6 +1434,45 @@ AudioRegion::maximum_amplitude (Progress* p) const return maxamp; } +double +AudioRegion::rms (Progress* p) const +{ + framepos_t fpos = _start; + framepos_t const fend = _start + _length; + uint32_t const n_chan = n_channels (); + double rms = 0; + + framecnt_t const blocksize = 64 * 1024; + Sample buf[blocksize]; + + framecnt_t total = 0; + + if (n_chan == 0 || fend == fpos) { + return 0; + } + + while (fpos < fend) { + framecnt_t const to_read = min (fend - fpos, blocksize); + for (uint32_t c = 0; c < n_chan; ++c) { + if (read_raw_internal (buf, fpos, to_read, c) != to_read) { + return 0; + } + for (framepos_t i = 0; i < to_read; ++i) { + rms += buf[i] * buf[i]; + } + } + total += to_read; + fpos += to_read; + if (p) { + p->set_progress (float (fpos - _start) / _length); + if (p->cancelled ()) { + return -1; + } + } + } + return sqrt (2. * rms / (double)(total * n_chan)); +} + /** Normalize using a given maximum amplitude and target, so that region * _scale_amplitude becomes target / max_amplitude. */ @@ -1594,93 +1623,136 @@ AudioRegion::get_related_audio_file_channel_count () const return chan_count; } -int -AudioRegion::adjust_transients (frameoffset_t delta) +void +AudioRegion::clear_transients () // yet unused { - for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { - (*x) = (*x) + delta; - } - + _user_transients.clear (); + _valid_transients = false; send_change (PropertyChange (Properties::valid_transients)); - - return 0; } -int -AudioRegion::update_transient (framepos_t old_position, framepos_t new_position) +void +AudioRegion::add_transient (framepos_t where) { - for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { - if ((*x) == old_position) { - (*x) = new_position; - send_change (PropertyChange (Properties::valid_transients)); + if (where < first_frame () || where >= last_frame ()) { + return; + } + where -= _position; - break; + if (!_valid_transients) { + _transient_user_start = _start; + _valid_transients = true; + } + frameoffset_t offset = _transient_user_start - _start; + + if (where < offset) { + if (offset <= 0) { + return; } + // region start changed (extend to front), shift points and offset + for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { + (*x) += offset; + } + _transient_user_start -= offset; + offset = 0; } - return 0; + const framepos_t p = where - offset; + _user_transients.push_back(p); + send_change (PropertyChange (Properties::valid_transients)); } void -AudioRegion::add_transient (framepos_t where) +AudioRegion::update_transient (framepos_t old_position, framepos_t new_position) { - _transients.push_back(where); - _valid_transients = true; + bool changed = false; + if (!_onsets.empty ()) { + const framepos_t p = old_position - _position; + AnalysisFeatureList::iterator x = std::find (_onsets.begin (), _onsets.end (), p); + if (x != _transients.end ()) { + (*x) = new_position - _position; + changed = true; + } + } - send_change (PropertyChange (Properties::valid_transients)); + if (_valid_transients) { + const frameoffset_t offset = _position + _transient_user_start - _start; + const framepos_t p = old_position - offset; + AnalysisFeatureList::iterator x = std::find (_user_transients.begin (), _user_transients.end (), p); + if (x != _transients.end ()) { + (*x) = new_position - offset; + changed = true; + } + } + + if (changed) { + send_change (PropertyChange (Properties::valid_transients)); + } } void AudioRegion::remove_transient (framepos_t where) { - _transients.remove(where); - _valid_transients = true; + bool changed = false; + if (!_onsets.empty ()) { + const framepos_t p = where - _position; + AnalysisFeatureList::iterator i = std::find (_onsets.begin (), _onsets.end (), p); + if (i != _transients.end ()) { + _onsets.erase (i); + changed = true; + } + } - send_change (PropertyChange (Properties::valid_transients)); + if (_valid_transients) { + const framepos_t p = where - (_position + _transient_user_start - _start); + AnalysisFeatureList::iterator i = std::find (_user_transients.begin (), _user_transients.end (), p); + if (i != _transients.end ()) { + _transients.erase (i); + changed = true; + } + } + + if (changed) { + send_change (PropertyChange (Properties::valid_transients)); + } } -int -AudioRegion::set_transients (AnalysisFeatureList& results) +void +AudioRegion::set_onsets (AnalysisFeatureList& results) { - _transients.clear(); - _transients = results; - _valid_transients = true; - + _onsets.clear(); + _onsets = results; send_change (PropertyChange (Properties::valid_transients)); - - return 0; } -int -AudioRegion::get_transients (AnalysisFeatureList& results, bool force_new) +void +AudioRegion::build_transients () { + _transients.clear (); + _transient_analysis_start = _transient_analysis_end = 0; + boost::shared_ptr pl = playlist(); if (!pl) { - return -1; - } - - if (_valid_transients && !force_new) { - results = _transients; - return 0; + return; } + /* check analyzed sources first */ SourceList::iterator s; - for (s = _sources.begin() ; s != _sources.end(); ++s) { if (!(*s)->has_been_analysed()) { +#ifndef NDEBUG cerr << "For " << name() << " source " << (*s)->name() << " has not been analyzed\n"; +#endif break; } } if (s == _sources.end()) { /* all sources are analyzed, merge data from each one */ - for (s = _sources.begin() ; s != _sources.end(); ++s) { /* find the set of transients within the bounds of this region */ - AnalysisFeatureList::iterator low = lower_bound ((*s)->transients.begin(), (*s)->transients.end(), _start); @@ -1690,23 +1762,19 @@ AudioRegion::get_transients (AnalysisFeatureList& results, bool force_new) _start + _length); /* and add them */ - - results.insert (results.end(), low, high); + _transients.insert (_transients.end(), low, high); } - TransientDetector::cleanup_transients (results, pl->session().frame_rate(), 3.0); + TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); /* translate all transients to current position */ - - for (AnalysisFeatureList::iterator x = results.begin(); x != results.end(); ++x) { + for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) { (*x) -= _start; - (*x) += _position; } - _transients = results; - _valid_transients = true; - - return 0; + _transient_analysis_start = _start; + _transient_analysis_end = _start + _length; + return; } /* no existing/complete transient info */ @@ -1721,7 +1789,7 @@ You currently have \"auto-analyse-audio\" disabled, which means \ that transient data must be generated every time it is required.\n\n\ If you are doing work that will require transient data on a \ regular basis, you should probably enable \"auto-analyse-audio\" \ -then quit %1 and restart.\n\n\ +in Preferences > Audio > Regions, then quit %1 and restart.\n\n\ This dialog will not display again. But you may notice a slight delay \ in this and future transient-detection operations.\n\ "), PROGRAM_NAME)); @@ -1729,64 +1797,74 @@ in this and future transient-detection operations.\n\ } } - bool existing_results = !results.empty(); - try { - TransientDetector t (pl->session().frame_rate()); - - _transients.clear (); - _valid_transients = false; - for (uint32_t i = 0; i < n_channels(); ++i) { AnalysisFeatureList these_results; t.reset (); + /* this produces analysis result relative to current position + * ::read() sample 0 is at _position */ if (t.run ("", this, i, these_results)) { - return -1; - } - - /* translate all transients to give absolute position */ - - for (AnalysisFeatureList::iterator i = these_results.begin(); i != these_results.end(); ++i) { - (*i) += _position; + return; } /* merge */ - _transients.insert (_transients.end(), these_results.begin(), these_results.end()); } } catch (...) { error << string_compose(_("Transient Analysis failed for %1."), _("Audio Region")) << endmsg; - return -1; + return; } - if (!results.empty()) { - if (existing_results) { - - /* merge our transients into the existing ones, then clean up - those. - */ - - results.insert (results.end(), _transients.begin(), _transients.end()); - TransientDetector::cleanup_transients (results, pl->session().frame_rate(), 3.0); - } - - /* make sure ours are clean too */ + TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); + _transient_analysis_start = _start; + _transient_analysis_end = _start + _length; +} - TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); +/* Transient analysis uses ::read() which is relative to _start, + * at the time of analysis and spans _length samples. + * + * This is true for RhythmFerret::run_analysis and the + * TransientDetector here. + * + * We store _start and length in _transient_analysis_start, + * _transient_analysis_end in case the region is trimmed or split after analysis. + * + * Various methods (most notably Playlist::find_next_transient and + * RhythmFerret::do_split_action) span multiple regions and *merge/combine* + * Analysis results. + * We therefore need to translate the analysis timestamps to absolute session-time + * and include the _position of the region. + * + * Note: we should special case the AudioRegionView. The region-view itself + * is located at _position (currently ARV subtracts _position again) + */ +void +AudioRegion::get_transients (AnalysisFeatureList& results) +{ + boost::shared_ptr pl = playlist(); + if (!playlist ()) { + return; + } - } else { + Region::merge_features (results, _user_transients, _position + _transient_user_start - _start); - TransientDetector::cleanup_transients (_transients, pl->session().frame_rate(), 3.0); - results = _transients; + if (!_onsets.empty ()) { + // onsets are invalidated when start or length changes + merge_features (results, _onsets, _position); + return; } - _valid_transients = true; + if ((_transient_analysis_start == _transient_analysis_end) + || _transient_analysis_start > _start + || _transient_analysis_end < _start + _length) { + build_transients (); + } - return 0; + merge_features (results, _transients, _position + _transient_analysis_start - _start); } /** Find areas of `silence' within a region. @@ -1817,11 +1895,13 @@ AudioRegion::find_silence (Sample threshold, framecnt_t min_length, framecnt_t f while (pos < end && !itt.cancel) { framecnt_t cur_samples = 0; + framecnt_t const to_read = min (end - pos, block_size); /* fill `loudest' with the loudest absolute sample at each instant, across all channels */ memset (loudest.get(), 0, sizeof (Sample) * block_size); + for (uint32_t n = 0; n < n_channels(); ++n) { - cur_samples = read_raw_internal (buf.get(), pos, block_size, n); + cur_samples = read_raw_internal (buf.get(), pos, to_read, n); for (framecnt_t i = 0; i < cur_samples; ++i) { loudest[i] = max (loudest[i], abs (buf[i])); }