#include "evoral/EventSink.hpp"
#include "ardour/debug.h"
+#include "ardour/file_source.h"
+#include "ardour/midi_channel_filter.h"
#include "ardour/midi_model.h"
-#include "ardour/midi_state_tracker.h"
#include "ardour/midi_source.h"
-#include "ardour/file_source.h"
+#include "ardour/midi_state_tracker.h"
#include "ardour/session.h"
#include "ardour/session_directory.h"
#include "ardour/source_factory.h"
framepos_t start,
framecnt_t cnt,
MidiStateTracker* tracker,
+ MidiChannelFilter* filter,
const std::set<Evoral::Parameter>& filtered) const
{
BeatsFramesConverter converter(_session.tempo_map(), source_start);
Evoral::Sequence<Evoral::Beats>::const_iterator& i = _model_iter;
const bool linear_read = _last_read_end != 0 && start == _last_read_end;
if (!linear_read || !_model_iter_valid) {
+#if 0
// Cached iterator is invalid, search for the first event past start
i = _model->begin(converter.from(start), false, filtered,
linear_read ? &_model->active_notes() : NULL);
if (!linear_read) {
_model->active_notes().clear();
}
+#else
+ /* hot-fix http://tracker.ardour.org/view.php?id=6541
+ * "parallel playback of linked midi regions -> no note-offs"
+ *
+ * A midi source can be used by multiple tracks simultaneously,
+ * in which case midi_read() may be called from different tracks for
+ * overlapping time-ranges.
+ *
+ * However there is only a single iterator for a given midi-source.
+ * This results in every midi_read() performing a seek.
+ *
+ * If seeking is performed with
+ * _model->begin(converter.from(start),...)
+ * the model is used for seeking. That method seeks to the first
+ * *note-on* event after 'start'.
+ *
+ * _model->begin(converter.from( ) ,..) eventually calls
+ * Sequence<Time>::const_iterator() in libs/evoral/src/Sequence.cpp
+ * which looks up the note-event via seq.note_lower_bound(t);
+ * but the sequence 'seq' only contains note-on events(!).
+ * note-off events are implicit in Sequence<Time>::operator++()
+ * via _active_notes.pop(); and not part of seq.
+ *
+ * see also http://tracker.ardour.org/view.php?id=6287#c16671
+ *
+ * The linear search below assures that reading starts at the first
+ * event for the given time, regardless of its event-type.
+ *
+ * The performance of this approach is O(N), while the previous
+ * implementation is O(log(N)). This needs to be optimized:
+ * The model-iterator or event-sequence needs to be re-designed in
+ * some way (maybe keep an iterator per playlist).
+ */
+ for (i = _model->begin(); i != _model->end(); ++i) {
+ const framecnt_t time_frames = converter.to(i->time());
+ if (time_frames >= start) {
+ break;
+ }
+ }
+ _model_iter_valid = true;
+#endif
}
_last_read_end = start + cnt;
for (; i != _model->end(); ++i) {
const framecnt_t time_frames = converter.to(i->time());
if (time_frames < start + cnt) {
+ if (filter && filter->filter(i->buffer(), i->size())) {
+ DEBUG_TRACE (DEBUG::MidiSourceIO,
+ string_compose ("%1: filter event @ %2 type %3 size %4\n",
+ _name, time_frames + source_start, i->event_type(), i->size()));
+ continue;
+ }
+
// Offset by source start to convert event time to session time
dst.write (time_frames + source_start, i->event_type(), i->size(), i->buffer());
}
return cnt;
} else {
- return read_unlocked (lm, dst, source_start, start, cnt, tracker);
+ return read_unlocked (lm, dst, source_start, start, cnt, tracker, filter);
}
}