1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
5 An audio time-stretching and pitch-shifting library.
6 Copyright 2007-2008 Chris Cannam.
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version. See the file
12 COPYING included with this distribution for more information.
15 #include "StretcherImpl.h"
16 #include "PercussiveAudioCurve.h"
17 #include "HighFrequencyAudioCurve.h"
18 #include "ConstantAudioCurve.h"
19 #include "StretchCalculator.h"
20 #include "StretcherChannelData.h"
21 #include "Resampler.h"
35 namespace RubberBand {
37 RubberBandStretcher::Impl::ProcessThread::ProcessThread(Impl *s, size_t c) :
40 m_dataAvailable(std::string("data ") + char('A' + c)),
45 RubberBandStretcher::Impl::ProcessThread::run()
47 if (m_s->m_debugLevel > 1) {
48 cerr << "thread " << m_channel << " getting going" << endl;
51 ChannelData &cd = *m_s->m_channelData[m_channel];
53 while (cd.inputSize == -1 ||
54 cd.inbuf->getReadSpace() > 0) {
56 // if (cd.inputSize != -1) {
57 // cerr << "inputSize == " << cd.inputSize
58 // << ", readSpace == " << cd.inbuf->getReadSpace() << endl;
61 bool any = false, last = false;
62 m_s->processChunks(m_channel, any, last);
66 if (any) m_s->m_spaceAvailable.signal();
68 m_dataAvailable.lock();
69 if (!m_s->testInbufReadSpace(m_channel) && !m_abandoning) {
70 m_dataAvailable.wait(50000); // bounded in case of abandonment
72 m_dataAvailable.unlock();
76 if (m_s->m_debugLevel > 1) {
77 cerr << "thread " << m_channel << " abandoning" << endl;
83 bool any = false, last = false;
84 m_s->processChunks(m_channel, any, last);
85 m_s->m_spaceAvailable.signal();
87 if (m_s->m_debugLevel > 1) {
88 cerr << "thread " << m_channel << " done" << endl;
93 RubberBandStretcher::Impl::ProcessThread::signalDataAvailable()
95 m_dataAvailable.signal();
99 RubberBandStretcher::Impl::ProcessThread::abandon()
105 RubberBandStretcher::Impl::resampleBeforeStretching() const
107 // We can't resample before stretching in offline mode, because
108 // the stretch calculation is based on doing it the other way
109 // around. It would take more work (and testing) to enable this.
110 if (!m_realtime) return false;
112 if (m_options & OptionPitchHighQuality) {
113 return (m_pitchScale < 1.0); // better sound
114 } else if (m_options & OptionPitchHighConsistency) {
117 return (m_pitchScale > 1.0); // better performance
122 RubberBandStretcher::Impl::consumeChannel(size_t c, const float *input,
123 size_t samples, bool final)
125 Profiler profiler("RubberBandStretcher::Impl::consumeChannel");
127 ChannelData &cd = *m_channelData[c];
128 RingBuffer<float> &inbuf = *cd.inbuf;
130 size_t toWrite = samples;
131 size_t writable = inbuf.getWriteSpace();
133 bool resampling = resampleBeforeStretching();
137 toWrite = int(ceil(samples / m_pitchScale));
138 if (writable < toWrite) {
139 samples = int(floor(writable * m_pitchScale));
140 if (samples == 0) return 0;
143 size_t reqSize = int(ceil(samples / m_pitchScale));
144 if (reqSize > cd.resamplebufSize) {
145 cerr << "WARNING: RubberBandStretcher::Impl::consumeChannel: resizing resampler buffer from "
146 << cd.resamplebufSize << " to " << reqSize << endl;
147 cd.setResampleBufSize(reqSize);
151 toWrite = cd.resampler->resample(&input,
159 if (writable < toWrite) {
167 inbuf.write(cd.resamplebuf, toWrite);
168 cd.inCount += samples;
171 inbuf.write(input, toWrite);
172 cd.inCount += toWrite;
178 RubberBandStretcher::Impl::processChunks(size_t c, bool &any, bool &last)
180 Profiler profiler("RubberBandStretcher::Impl::processChunks");
182 // Process as many chunks as there are available on the input
183 // buffer for channel c. This requires that the increments have
184 // already been calculated.
186 ChannelData &cd = *m_channelData[c];
193 if (!testInbufReadSpace(c)) {
194 // cerr << "not enough input" << endl;
201 size_t got = cd.inbuf->peek(cd.fltbuf, m_windowSize);
202 assert(got == m_windowSize || cd.inputSize >= 0);
203 cd.inbuf->skip(m_increment);
207 bool phaseReset = false;
208 size_t phaseIncrement, shiftIncrement;
209 getIncrements(c, phaseIncrement, shiftIncrement, phaseReset);
211 last = processChunkForChannel(c, phaseIncrement, shiftIncrement, phaseReset);
213 if (m_debugLevel > 2) {
214 cerr << "channel " << c << ": last = " << last << ", chunkCount = " << cd.chunkCount << endl;
220 RubberBandStretcher::Impl::processOneChunk()
222 Profiler profiler("RubberBandStretcher::Impl::processOneChunk");
224 // Process a single chunk for all channels, provided there is
225 // enough data on each channel for at least one chunk. This is
226 // able to calculate increments as it goes along.
228 for (size_t c = 0; c < m_channels; ++c) {
229 if (!testInbufReadSpace(c)) return false;
230 ChannelData &cd = *m_channelData[c];
232 size_t got = cd.inbuf->peek(cd.fltbuf, m_windowSize);
233 assert(got == m_windowSize || cd.inputSize >= 0);
234 cd.inbuf->skip(m_increment);
239 bool phaseReset = false;
240 size_t phaseIncrement, shiftIncrement;
241 if (!getIncrements(0, phaseIncrement, shiftIncrement, phaseReset)) {
242 calculateIncrements(phaseIncrement, shiftIncrement, phaseReset);
246 for (size_t c = 0; c < m_channels; ++c) {
247 last = processChunkForChannel(c, phaseIncrement, shiftIncrement, phaseReset);
248 m_channelData[c]->chunkCount++;
255 RubberBandStretcher::Impl::testInbufReadSpace(size_t c)
257 Profiler profiler("RubberBandStretcher::Impl::testInbufReadSpace");
259 ChannelData &cd = *m_channelData[c];
260 RingBuffer<float> &inbuf = *cd.inbuf;
262 size_t rs = inbuf.getReadSpace();
264 if (rs < m_windowSize && !cd.draining) {
266 if (cd.inputSize == -1) {
268 // Not all the input data has been written to the inbuf
269 // (that's why the input size is not yet set). We can't
270 // process, because we don't have a full chunk of data, so
271 // our process chunk would contain some empty padding in
272 // its input -- and that would give incorrect output, as
273 // we know there is more input to come.
276 // cerr << "WARNING: RubberBandStretcher: read space < chunk size ("
277 // << inbuf.getReadSpace() << " < " << m_windowSize
278 // << ") when not all input written, on processChunks for channel " << c << endl;
285 if (m_debugLevel > 1) {
286 cerr << "read space = 0, giving up" << endl;
290 } else if (rs < m_windowSize/2) {
292 if (m_debugLevel > 1) {
293 cerr << "read space = " << rs << ", setting draining true" << endl;
304 RubberBandStretcher::Impl::processChunkForChannel(size_t c,
305 size_t phaseIncrement,
306 size_t shiftIncrement,
309 Profiler profiler("RubberBandStretcher::Impl::processChunkForChannel");
311 // Process a single chunk on a single channel. This assumes
312 // enough input data is available; caller must have tested this
313 // using e.g. testInbufReadSpace first. Return true if this is
314 // the last chunk on the channel.
316 if (phaseReset && (m_debugLevel > 1)) {
317 cerr << "processChunkForChannel: phase reset found, incrs "
318 << phaseIncrement << ":" << shiftIncrement << endl;
321 ChannelData &cd = *m_channelData[c];
325 // This is the normal processing case -- draining is only
326 // set when all the input has been used and we only need
327 // to write from the existing accumulator into the output.
329 // We know we have enough samples available in m_inbuf --
330 // this is usually m_windowSize, but we know that if fewer
331 // are available, it's OK to use zeroes for the rest
332 // (which the ring buffer will provide) because we've
333 // reached the true end of the data.
335 // We need to peek m_windowSize samples for processing, and
336 // then skip m_increment to advance the read pointer.
338 modifyChunk(c, phaseIncrement, phaseReset);
339 synthesiseChunk(c); // reads from cd.mag, cd.phase
341 if (m_debugLevel > 2) {
343 for (int i = 0; i < 10; ++i) {
344 cd.accumulator[i] = 1.2f - (i % 3) * 1.2f;
353 if (m_debugLevel > 1) {
354 cerr << "draining: accumulator fill = " << cd.accumulatorFill << " (shiftIncrement = " << shiftIncrement << ")" << endl;
356 if (shiftIncrement == 0) {
357 cerr << "WARNING: draining: shiftIncrement == 0, can't handle that in this context: setting to " << m_increment << endl;
358 shiftIncrement = m_increment;
360 if (cd.accumulatorFill <= shiftIncrement) {
361 if (m_debugLevel > 1) {
362 cerr << "reducing shift increment from " << shiftIncrement
363 << " to " << cd.accumulatorFill
364 << " and marking as last" << endl;
366 shiftIncrement = cd.accumulatorFill;
373 int required = shiftIncrement;
375 if (m_pitchScale != 1.0) {
376 required = int(required / m_pitchScale) + 1;
379 if (cd.outbuf->getWriteSpace() < required) {
380 if (m_debugLevel > 0) {
381 cerr << "Buffer overrun on output for channel " << c << endl;
384 //!!! The only correct thing we can do here is resize the
385 // buffer. We can't wait for the client thread to read
386 // some data out from the buffer so as to make more space,
387 // because the client thread is probably stuck in a
388 // process() call waiting for us to stow away enough input
389 // increments to allow the process() call to complete.
394 writeChunk(c, shiftIncrement, last);
399 RubberBandStretcher::Impl::calculateIncrements(size_t &phaseIncrementRtn,
400 size_t &shiftIncrementRtn,
403 Profiler profiler("RubberBandStretcher::Impl::calculateIncrements");
405 // cerr << "calculateIncrements" << endl;
407 // Calculate the next upcoming phase and shift increment, on the
408 // basis that both channels are in sync. This is in contrast to
409 // getIncrements, which requires that all the increments have been
410 // calculated in advance but can then return increments
411 // corresponding to different chunks in different channels.
413 // Requires frequency domain representations of channel data in
414 // the mag and phase buffers in the channel.
416 // This function is only used in real-time mode.
418 phaseIncrementRtn = m_increment;
419 shiftIncrementRtn = m_increment;
422 if (m_channels == 0) return;
424 ChannelData &cd = *m_channelData[0];
426 size_t bc = cd.chunkCount;
427 for (size_t c = 1; c < m_channels; ++c) {
428 if (m_channelData[c]->chunkCount != bc) {
429 cerr << "ERROR: RubberBandStretcher::Impl::calculateIncrements: Channels are not in sync" << endl;
434 const int hs = m_windowSize/2 + 1;
436 // Normally we would mix down the time-domain signal and apply a
437 // single FFT, or else mix down the Cartesian form of the
438 // frequency-domain signal. Both of those would be inefficient
439 // from this position. Fortunately, the onset detectors should
440 // work reasonably well (maybe even better?) if we just sum the
441 // magnitudes of the frequency-domain channel signals and forget
442 // about phase entirely. Normally we don't expect the channel
443 // phases to cancel each other, and broadband effects will still
449 if (m_channels == 1) {
451 df = m_phaseResetAudioCurve->processDouble(cd.mag, m_increment);
452 silent = (m_silentAudioCurve->processDouble(cd.mag, m_increment) > 0.f);
456 double *tmp = (double *)alloca(hs * sizeof(double));
458 for (int i = 0; i < hs; ++i) {
461 for (size_t c = 0; c < m_channels; ++c) {
462 for (int i = 0; i < hs; ++i) {
463 tmp[i] += m_channelData[c]->mag[i];
467 df = m_phaseResetAudioCurve->processDouble(tmp, m_increment);
468 silent = (m_silentAudioCurve->processDouble(tmp, m_increment) > 0.f);
471 int incr = m_stretchCalculator->calculateSingle
472 (getEffectiveRatio(), df, m_increment);
474 m_lastProcessPhaseResetDf.write(&df, 1);
475 m_lastProcessOutputIncrements.write(&incr, 1);
482 // The returned increment is the phase increment. The shift
483 // increment for one chunk is the same as the phase increment for
484 // the following chunk (see comment below). This means we don't
485 // actually know the shift increment until we see the following
486 // phase increment... which is a bit of a problem.
488 // This implies we should use this increment for the shift
489 // increment, and make the following phase increment the same as
490 // it. This means in RT mode we'll be one chunk later with our
491 // phase reset than we would be in non-RT mode. The sensitivity
492 // of the broadband onset detector may mean that this isn't a
493 // problem -- test it and see.
495 shiftIncrementRtn = incr;
497 if (cd.prevIncrement == 0) {
498 phaseIncrementRtn = shiftIncrementRtn;
500 phaseIncrementRtn = cd.prevIncrement;
503 cd.prevIncrement = shiftIncrementRtn;
505 if (silent) ++m_silentHistory;
506 else m_silentHistory = 0;
508 if (m_silentHistory >= int(m_windowSize / m_increment) && !phaseReset) {
510 if (m_debugLevel > 1) {
511 cerr << "calculateIncrements: phase reset on silence (silent history == "
512 << m_silentHistory << ")" << endl;
518 RubberBandStretcher::Impl::getIncrements(size_t channel,
519 size_t &phaseIncrementRtn,
520 size_t &shiftIncrementRtn,
523 Profiler profiler("RubberBandStretcher::Impl::getIncrements");
525 if (channel >= m_channels) {
526 phaseIncrementRtn = m_increment;
527 shiftIncrementRtn = m_increment;
532 // There are two relevant output increments here. The first is
533 // the phase increment which we use when recalculating the phases
534 // for the current chunk; the second is the shift increment used
535 // to determine how far to shift the processing buffer after
536 // writing the chunk. The shift increment for one chunk is the
537 // same as the phase increment for the following chunk.
539 // When an onset occurs for which we need to reset phases, the
540 // increment given will be negative.
542 // When we reset phases, the previous shift increment (and so
543 // current phase increments) must have been m_increment to ensure
546 // m_outputIncrements stores phase increments.
548 ChannelData &cd = *m_channelData[channel];
551 if (cd.chunkCount >= m_outputIncrements.size()) {
552 // cerr << "WARNING: RubberBandStretcher::Impl::getIncrements:"
553 // << " chunk count " << cd.chunkCount << " >= "
554 // << m_outputIncrements.size() << endl;
555 if (m_outputIncrements.size() == 0) {
556 phaseIncrementRtn = m_increment;
557 shiftIncrementRtn = m_increment;
561 cd.chunkCount = m_outputIncrements.size()-1;
566 int phaseIncrement = m_outputIncrements[cd.chunkCount];
568 int shiftIncrement = phaseIncrement;
569 if (cd.chunkCount + 1 < m_outputIncrements.size()) {
570 shiftIncrement = m_outputIncrements[cd.chunkCount + 1];
573 if (phaseIncrement < 0) {
574 phaseIncrement = -phaseIncrement;
578 if (shiftIncrement < 0) {
579 shiftIncrement = -shiftIncrement;
582 if (shiftIncrement >= int(m_windowSize)) {
583 cerr << "*** ERROR: RubberBandStretcher::Impl::processChunks: shiftIncrement " << shiftIncrement << " >= windowSize " << m_windowSize << " at " << cd.chunkCount << " (of " << m_outputIncrements.size() << ")" << endl;
584 shiftIncrement = m_windowSize;
587 phaseIncrementRtn = phaseIncrement;
588 shiftIncrementRtn = shiftIncrement;
589 if (cd.chunkCount == 0) phaseReset = true; // don't mess with the first chunk
594 RubberBandStretcher::Impl::analyseChunk(size_t channel)
596 Profiler profiler("RubberBandStretcher::Impl::analyseChunk");
600 ChannelData &cd = *m_channelData[channel];
602 double *const R__ dblbuf = cd.dblbuf;
603 float *const R__ fltbuf = cd.fltbuf;
605 int sz = m_windowSize;
606 int hs = m_windowSize/2;
608 // cd.fltbuf is known to contain m_windowSize samples
610 m_window->cut(fltbuf);
612 if (cd.oversample > 1) {
614 int bufsiz = sz * cd.oversample;
615 int offset = (bufsiz - sz) / 2;
619 for (i = 0; i < offset; ++i) {
622 for (i = 0; i < offset; ++i) {
623 dblbuf[bufsiz - i - 1] = 0.0;
625 for (i = 0; i < sz; ++i) {
626 dblbuf[offset + i] = fltbuf[i];
628 for (i = 0; i < bufsiz / 2; ++i) {
629 double tmp = dblbuf[i];
630 dblbuf[i] = dblbuf[i + bufsiz/2];
631 dblbuf[i + bufsiz/2] = tmp;
634 for (i = 0; i < hs; ++i) {
635 dblbuf[i] = fltbuf[i + hs];
636 dblbuf[i + hs] = fltbuf[i];
640 cd.fft->forwardPolar(dblbuf, cd.mag, cd.phase);
643 static inline double mod(double x, double y) { return x - (y * floor(x / y)); }
644 static inline double princarg(double a) { return mod(a + M_PI, -2.0 * M_PI) + M_PI; }
647 RubberBandStretcher::Impl::modifyChunk(size_t channel,
648 size_t outputIncrement,
651 Profiler profiler("RubberBandStretcher::Impl::modifyChunk");
653 ChannelData &cd = *m_channelData[channel];
655 if (phaseReset && m_debugLevel > 1) {
656 cerr << "phase reset: leaving phases unmodified" << endl;
659 const double rate = m_sampleRate;
660 const int sz = m_windowSize;
661 const int count = (sz * cd.oversample) / 2;
663 bool unchanged = cd.unchanged && (outputIncrement == m_increment);
664 bool fullReset = phaseReset;
665 bool laminar = !(m_options & OptionPhaseIndependent);
666 bool bandlimited = (m_options & OptionTransientsMixed);
667 int bandlow = lrint((150 * sz * cd.oversample) / rate);
668 int bandhigh = lrint((1000 * sz * cd.oversample) / rate);
670 float freq0 = m_freq0;
671 float freq1 = m_freq1;
672 float freq2 = m_freq2;
675 float r = getEffectiveRatio();
677 float rf0 = 600 + (600 * ((r-1)*(r-1)*(r-1)*2));
678 float f1ratio = freq1 / freq0;
679 float f2ratio = freq2 / freq0;
680 freq0 = std::max(freq0, rf0);
681 freq1 = freq0 * f1ratio;
682 freq2 = freq0 * f2ratio;
686 int limit0 = lrint((freq0 * sz * cd.oversample) / rate);
687 int limit1 = lrint((freq1 * sz * cd.oversample) / rate);
688 int limit2 = lrint((freq2 * sz * cd.oversample) / rate);
690 if (limit1 < limit0) limit1 = limit0;
691 if (limit2 < limit1) limit2 = limit1;
693 double prevInstability = 0.0;
694 bool prevDirection = false;
696 double distance = 0.0;
697 const double maxdist = 8.0;
699 const int lookback = 1;
701 double distacc = 0.0;
703 for (int i = count; i >= 0; i -= lookback) {
705 bool resetThis = phaseReset;
709 if (i > bandlow && i < bandhigh) {
716 double p = cd.phase[i];
721 if (i <= limit0) mi = 0.0;
722 else if (i <= limit1) mi = 1.0;
723 else if (i <= limit2) mi = 3.0;
727 double omega = (2 * M_PI * m_increment * i) / (sz * cd.oversample);
729 double pp = cd.prevPhase[i];
730 double ep = pp + omega;
731 perr = princarg(p - ep);
733 double instability = fabs(perr - cd.prevError[i]);
734 bool direction = (perr > cd.prevError[i]);
736 bool inherit = false;
739 if (distance >= mi || i == count) {
741 } else if (bandlimited && (i == bandhigh || i == bandlow)) {
743 } else if (instability > prevInstability &&
744 direction == prevDirection) {
749 double advance = outputIncrement * ((omega + perr) / m_increment);
753 cd.unwrappedPhase[i + lookback] - cd.prevPhase[i + lookback];
754 advance = ((advance * distance) +
755 (inherited * (maxdist - distance)))
757 outphase = p + advance;
761 outphase = cd.unwrappedPhase[i] + advance;
765 prevInstability = instability;
766 prevDirection = direction;
772 cd.prevError[i] = perr;
774 cd.phase[i] = outphase;
775 cd.unwrappedPhase[i] = outphase;
778 if (m_debugLevel > 1) {
779 cerr << "mean inheritance distance = " << distacc / count << endl;
782 if (fullReset) unchanged = true;
783 cd.unchanged = unchanged;
785 if (unchanged && m_debugLevel > 1) {
786 cerr << "frame unchanged on channel " << channel << endl;
792 RubberBandStretcher::Impl::formantShiftChunk(size_t channel)
794 Profiler profiler("RubberBandStretcher::Impl::formantShiftChunk");
796 ChannelData &cd = *m_channelData[channel];
798 double *const R__ mag = cd.mag;
799 double *const R__ envelope = cd.envelope;
800 double *const R__ dblbuf = cd.dblbuf;
802 const int sz = m_windowSize;
803 const int hs = m_windowSize/2;
804 const double denom = sz;
807 cd.fft->inverseCepstral(mag, dblbuf);
809 for (int i = 0; i < sz; ++i) {
813 const int cutoff = m_sampleRate / 700;
815 // cerr <<"cutoff = "<< cutoff << ", m_sampleRate/cutoff = " << m_sampleRate/cutoff << endl;
818 dblbuf[cutoff-1] /= 2;
820 for (int i = cutoff; i < sz; ++i) {
824 cd.fft->forward(dblbuf, envelope, 0);
827 for (int i = 0; i <= hs; ++i) {
828 envelope[i] = exp(envelope[i]);
830 for (int i = 0; i <= hs; ++i) {
831 mag[i] /= envelope[i];
834 if (m_pitchScale > 1.0) {
835 // scaling up, we want a new envelope that is lower by the pitch factor
836 for (int target = 0; target <= hs; ++target) {
837 int source = lrint(target * m_pitchScale);
838 if (source > int(m_windowSize)) {
839 envelope[target] = 0.0;
841 envelope[target] = envelope[source];
845 // scaling down, we want a new envelope that is higher by the pitch factor
846 for (int target = hs; target > 0; ) {
848 int source = lrint(target * m_pitchScale);
849 envelope[target] = envelope[source];
853 for (int i = 0; i <= hs; ++i) {
854 mag[i] *= envelope[i];
857 cd.unchanged = false;
861 RubberBandStretcher::Impl::synthesiseChunk(size_t channel)
863 Profiler profiler("RubberBandStretcher::Impl::synthesiseChunk");
866 if ((m_options & OptionFormantPreserved) &&
867 (m_pitchScale != 1.0)) {
868 formantShiftChunk(channel);
871 ChannelData &cd = *m_channelData[channel];
873 double *const R__ dblbuf = cd.dblbuf;
874 float *const R__ fltbuf = cd.fltbuf;
875 float *const R__ accumulator = cd.accumulator;
876 float *const R__ windowAccumulator = cd.windowAccumulator;
878 int sz = m_windowSize;
879 int hs = m_windowSize/2;
885 cd.fft->inversePolar(cd.mag, cd.phase, cd.dblbuf);
887 if (cd.oversample > 1) {
889 int bufsiz = sz * cd.oversample;
890 int hbs = hs * cd.oversample;
891 int offset = (bufsiz - sz) / 2;
893 for (i = 0; i < hbs; ++i) {
894 double tmp = dblbuf[i];
895 dblbuf[i] = dblbuf[i + hbs];
896 dblbuf[i + hbs] = tmp;
898 for (i = 0; i < sz; ++i) {
899 fltbuf[i] = float(dblbuf[i + offset]);
902 for (i = 0; i < hs; ++i) {
903 fltbuf[i] = float(dblbuf[i + hs]);
905 for (i = 0; i < hs; ++i) {
906 fltbuf[i + hs] = float(dblbuf[i]);
910 float denom = float(sz * cd.oversample);
912 // our ffts produced unscaled results
913 for (i = 0; i < sz; ++i) {
914 fltbuf[i] = fltbuf[i] / denom;
918 m_window->cut(fltbuf);
920 for (i = 0; i < sz; ++i) {
921 accumulator[i] += fltbuf[i];
924 cd.accumulatorFill = m_windowSize;
926 float fixed = m_window->getArea() * 1.5f;
928 for (i = 0; i < sz; ++i) {
929 float val = m_window->getValue(i);
930 windowAccumulator[i] += val * fixed;
935 RubberBandStretcher::Impl::writeChunk(size_t channel, size_t shiftIncrement, bool last)
937 Profiler profiler("RubberBandStretcher::Impl::writeChunk");
939 ChannelData &cd = *m_channelData[channel];
941 float *const R__ accumulator = cd.accumulator;
942 float *const R__ windowAccumulator = cd.windowAccumulator;
944 const int sz = m_windowSize;
945 const int si = shiftIncrement;
949 if (m_debugLevel > 2) {
950 cerr << "writeChunk(" << channel << ", " << shiftIncrement << ", " << last << ")" << endl;
953 for (i = 0; i < si; ++i) {
954 if (windowAccumulator[i] > 0.f) {
955 accumulator[i] /= windowAccumulator[i];
959 // for exact sample scaling (probably not meaningful if we
960 // were running in RT mode)
961 size_t theoreticalOut = 0;
962 if (cd.inputSize >= 0) {
963 theoreticalOut = lrint(cd.inputSize * m_timeRatio);
966 bool resampledAlready = resampleBeforeStretching();
968 if (!resampledAlready &&
969 (m_pitchScale != 1.0 || m_options & OptionPitchHighConsistency) &&
972 size_t reqSize = int(ceil(si / m_pitchScale));
973 if (reqSize > cd.resamplebufSize) {
974 // This shouldn't normally happen -- the buffer is
975 // supposed to be initialised with enough space in the
976 // first place. But we retain this check in case the
977 // pitch scale has changed since then, or the stretch
978 // calculator has gone mad, or something.
979 cerr << "WARNING: RubberBandStretcher::Impl::writeChunk: resizing resampler buffer from "
980 << cd.resamplebufSize << " to " << reqSize << endl;
981 cd.setResampleBufSize(reqSize);
985 size_t outframes = cd.resampler->resample(&cd.accumulator,
992 writeOutput(*cd.outbuf, cd.resamplebuf,
993 outframes, cd.outCount, theoreticalOut);
996 writeOutput(*cd.outbuf, accumulator,
997 si, cd.outCount, theoreticalOut);
1000 for (i = 0; i < sz - si; ++i) {
1001 accumulator[i] = accumulator[i + si];
1004 for (i = sz - si; i < sz; ++i) {
1005 accumulator[i] = 0.0f;
1008 for (i = 0; i < sz - si; ++i) {
1009 windowAccumulator[i] = windowAccumulator[i + si];
1012 for (i = sz - si; i < sz; ++i) {
1013 windowAccumulator[i] = 0.0f;
1016 if (int(cd.accumulatorFill) > si) {
1017 cd.accumulatorFill -= si;
1019 cd.accumulatorFill = 0;
1021 if (m_debugLevel > 1) {
1022 cerr << "RubberBandStretcher::Impl::processChunks: setting outputComplete to true" << endl;
1024 cd.outputComplete = true;
1030 RubberBandStretcher::Impl::writeOutput(RingBuffer<float> &to, float *from, size_t qty, size_t &outCount, size_t theoreticalOut)
1032 Profiler profiler("RubberBandStretcher::Impl::writeOutput");
1034 // In non-RT mode, we don't want to write the first startSkip
1035 // samples, because the first chunk is centred on the start of the
1036 // output. In RT mode we didn't apply any pre-padding in
1037 // configure(), so we don't want to remove any here.
1039 size_t startSkip = 0;
1041 startSkip = lrintf((m_windowSize/2) / m_pitchScale);
1044 if (outCount > startSkip) {
1046 // this is the normal case
1048 if (theoreticalOut > 0) {
1049 if (m_debugLevel > 1) {
1050 cerr << "theoreticalOut = " << theoreticalOut
1051 << ", outCount = " << outCount
1052 << ", startSkip = " << startSkip
1053 << ", qty = " << qty << endl;
1055 if (outCount - startSkip <= theoreticalOut &&
1056 outCount - startSkip + qty > theoreticalOut) {
1057 qty = theoreticalOut - (outCount - startSkip);
1058 if (m_debugLevel > 1) {
1059 cerr << "reduce qty to " << qty << endl;
1064 if (m_debugLevel > 2) {
1065 cerr << "writing " << qty << endl;
1068 size_t written = to.write(from, qty);
1070 if (written < qty) {
1071 cerr << "WARNING: RubberBandStretcher::Impl::writeOutput: "
1072 << "Buffer overrun on output: wrote " << written
1073 << " of " << qty << " samples" << endl;
1076 outCount += written;
1080 // the rest of this is only used during the first startSkip samples
1082 if (outCount + qty <= startSkip) {
1083 if (m_debugLevel > 1) {
1084 cerr << "qty = " << qty << ", startSkip = "
1085 << startSkip << ", outCount = " << outCount
1086 << ", discarding" << endl;
1092 size_t off = startSkip - outCount;
1093 if (m_debugLevel > 1) {
1094 cerr << "qty = " << qty << ", startSkip = "
1095 << startSkip << ", outCount = " << outCount
1096 << ", writing " << qty - off
1097 << " from start offset " << off << endl;
1099 to.write(from + off, qty - off);
1104 RubberBandStretcher::Impl::available() const
1106 Profiler profiler("RubberBandStretcher::Impl::available");
1109 MutexLocker locker(&m_threadSetMutex);
1110 if (m_channelData.empty()) return 0;
1112 if (m_channelData.empty()) return 0;
1116 for (size_t c = 0; c < m_channels; ++c) {
1117 if (m_channelData[c]->inputSize >= 0) {
1118 // cerr << "available: m_done true" << endl;
1119 if (m_channelData[c]->inbuf->getReadSpace() > 0) {
1120 // cerr << "calling processChunks(" << c << ") from available" << endl;
1121 //!!! do we ever actually do this? if so, this method should not be const
1122 // ^^^ yes, we do sometimes -- e.g. when fed a very short file
1123 bool any = false, last = false;
1124 ((RubberBandStretcher::Impl *)this)->processChunks(c, any, last);
1131 bool consumed = true;
1132 bool haveResamplers = false;
1134 for (size_t i = 0; i < m_channels; ++i) {
1135 size_t availIn = m_channelData[i]->inbuf->getReadSpace();
1136 size_t availOut = m_channelData[i]->outbuf->getReadSpace();
1137 if (m_debugLevel > 2) {
1138 cerr << "available on channel " << i << ": " << availOut << " (waiting: " << availIn << ")" << endl;
1140 if (i == 0 || availOut < min) min = availOut;
1141 if (!m_channelData[i]->outputComplete) consumed = false;
1142 if (m_channelData[i]->resampler) haveResamplers = true;
1145 if (min == 0 && consumed) return -1;
1146 if (m_pitchScale == 1.0) return min;
1148 if (haveResamplers) return min; // resampling has already happened
1149 return int(floor(min / m_pitchScale));
1153 RubberBandStretcher::Impl::retrieve(float *const *output, size_t samples) const
1155 Profiler profiler("RubberBandStretcher::Impl::retrieve");
1157 size_t got = samples;
1159 for (size_t c = 0; c < m_channels; ++c) {
1160 size_t gotHere = m_channelData[c]->outbuf->read(output[c], got);
1161 if (gotHere < got) {
1163 if (m_debugLevel > 0) {
1164 cerr << "RubberBandStretcher::Impl::retrieve: WARNING: channel imbalance detected" << endl;