_peaks_built = false;
_peak_byte_max = 0;
_peakfile_descriptor = 0;
- _read_data_count = 0;
- _write_data_count = 0;
peak_leftover_cnt = 0;
peak_leftover_size = 0;
peak_leftovers = 0;
_peaks_built = false;
_peak_byte_max = 0;
_peakfile_descriptor = 0;
- _read_data_count = 0;
- _write_data_count = 0;
peak_leftover_cnt = 0;
peak_leftover_size = 0;
peak_leftovers = 0;
framecnt_t
AudioSource::read (Sample *dst, framepos_t start, framecnt_t cnt, int /*channel*/) const
{
+ assert (cnt >= 0);
+
Glib::Mutex::Lock lm (_lock);
return read_unlocked (dst, start, cnt);
}
*/
const framecnt_t chunksize = (framecnt_t) min (expected_peaks, 65536.0);
-
+
staging = new PeakData[chunksize];
/* compute the rounded up frame position */
adjusting zero_fill and npeaks and then breaking out of
this loop early
*/
-
+
memset (raw_staging, 0, sizeof (Sample) * chunksize);
-
+
} else {
-
+
to_read = min (chunksize, (_length - current_frame));
-
-
+
+
if ((frames_read = read_unlocked (raw_staging, current_frame, to_read)) == 0) {
error << string_compose(_("AudioSource[%1]: peak read - cannot read %2 samples at offset %3 of %4 (%5)"),
_name, to_read, current_frame, _length, strerror (errno))
goto out;
}
}
-
+
i = 0;
}
framecnt_t frames_to_read = min (bufsize, cnt);
framecnt_t frames_read;
-
+
if ((frames_read = read_unlocked (buf, current_frame, frames_to_read)) != frames_to_read) {
error << string_compose(_("%1: could not write read raw data for peak computation (%2)"), _name, strerror (errno)) << endmsg;
done_with_peakfile_writes (false);
off_t target_length = blocksize * ((first_peak_byte + blocksize + 1) / blocksize);
if (endpos < target_length) {
- (void) ftruncate (_peakfile_fd, target_length);
- /* error doesn't actually matter though, so continue on without testing */
+ if (ftruncate (_peakfile_fd, target_length)) {
+ /* error doesn't actually matter so continue on without testing */
+ }
}
}
off_t end = lseek (_peakfile_fd, 0, SEEK_END);
if (end > _peak_byte_max) {
- (void) ftruncate (_peakfile_fd, _peak_byte_max);
+ if (ftruncate (_peakfile_fd, _peak_byte_max)) {
+ error << string_compose (_("could not truncate peakfile %1 to %2 (error: %3)"),
+ peakpath, _peak_byte_max, errno) << endmsg;
+ }
}
}
return (end/sizeof(PeakData)) * _FPP;
}
-void
-AudioSource::dec_read_data_count (framecnt_t cnt)
-{
- uint32_t val = cnt * sizeof (Sample);
-
- if (val < _read_data_count) {
- _read_data_count -= val;
- } else {
- _read_data_count = 0;
- }
-}
-
void
AudioSource::mark_streaming_write_completed ()
{
AudioSource::allocate_working_buffers (framecnt_t framerate)
{
Glib::Mutex::Lock lm (_level_buffer_lock);
-
-
+
+
/* Note: we don't need any buffers allocated until
a level 1 audiosource is created, at which
time we'll call ::ensure_buffers_for_level()
with the right value and do the right thing.
*/
-
+
if (!_mixdown_buffers.empty()) {
ensure_buffers_for_level_locked ( _mixdown_buffers.size(), framerate);
}
}
void
-AudioSource::ensure_buffers_for_level (uint32_t level, framecnt_t frame_rate)
+AudioSource::ensure_buffers_for_level (uint32_t level, framecnt_t frame_rate)
{
Glib::Mutex::Lock lm (_level_buffer_lock);
ensure_buffers_for_level_locked (level, frame_rate);
}
void
-AudioSource::ensure_buffers_for_level_locked (uint32_t level, framecnt_t frame_rate)
+AudioSource::ensure_buffers_for_level_locked (uint32_t level, framecnt_t frame_rate)
{
framecnt_t nframes = (framecnt_t) floor (Config->get_audio_playback_buffer_seconds() * frame_rate);
_mixdown_buffers.clear ();
_gain_buffers.clear ();
- cerr << "Allocating nested buffers for level " << level << endl;
-
while (_mixdown_buffers.size() < level) {
_mixdown_buffers.push_back (boost::shared_ptr<Sample> (new Sample[nframes]));
_gain_buffers.push_back (boost::shared_ptr<gain_t> (new gain_t[nframes]));