2 Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "ffmpeg_decoder.h"
24 #include "video_decoder.h"
25 #include "audio_decoder.h"
26 #include "audio_buffers.h"
27 #include "audio_content.h"
28 #include "ffmpeg_content.h"
29 #include "image_decoder.h"
30 #include "content_audio.h"
31 #include "image_content.h"
32 #include "subtitle_content.h"
33 #include "text_subtitle_decoder.h"
34 #include "text_subtitle_content.h"
35 #include "video_mxf_decoder.h"
36 #include "video_mxf_content.h"
37 #include "dcp_content.h"
40 #include "raw_image_proxy.h"
43 #include "render_subtitles.h"
45 #include "content_video.h"
46 #include "player_video.h"
47 #include "frame_rate_change.h"
48 #include "dcp_content.h"
49 #include "dcp_decoder.h"
50 #include "dcp_subtitle_content.h"
51 #include "dcp_subtitle_decoder.h"
52 #include "audio_processor.h"
54 #include "referenced_reel_asset.h"
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <boost/foreach.hpp>
66 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
78 using boost::shared_ptr;
79 using boost::weak_ptr;
80 using boost::dynamic_pointer_cast;
81 using boost::optional;
82 using boost::scoped_ptr;
85 has_video (Content* c)
87 return static_cast<bool>(c->video);
91 has_audio (Content* c)
93 return static_cast<bool>(c->audio);
97 has_subtitle (Content* c)
99 return static_cast<bool>(c->subtitle);
102 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
104 , _playlist (playlist)
105 , _have_valid_pieces (false)
106 , _ignore_video (false)
107 , _ignore_audio (false)
108 , _always_burn_subtitles (false)
110 , _play_referenced (false)
112 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
113 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
114 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
115 set_video_container_size (_film->frame_size ());
117 film_changed (Film::AUDIO_PROCESSOR);
121 Player::setup_pieces ()
123 list<shared_ptr<Piece> > old_pieces = _pieces;
126 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
128 if (!i->paths_valid ()) {
132 shared_ptr<Decoder> decoder;
133 optional<FrameRateChange> frc;
136 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
138 decoder.reset (new FFmpegDecoder (fc, _film->log(), _fast));
139 frc = FrameRateChange (fc->active_video_frame_rate(), _film->video_frame_rate());
142 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
144 decoder.reset (new DCPDecoder (dc, _film->log(), _fast));
145 frc = FrameRateChange (dc->active_video_frame_rate(), _film->video_frame_rate());
149 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
151 /* See if we can re-use an old ImageDecoder */
152 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
153 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
154 if (imd && imd->content() == ic) {
160 decoder.reset (new ImageDecoder (ic, _film->log()));
163 frc = FrameRateChange (ic->active_video_frame_rate(), _film->video_frame_rate());
166 /* It's questionable whether subtitle content should have a video frame rate; perhaps
167 it should be assumed that any subtitle content has been prepared at the same rate
168 as simultaneous video content (like we do with audio).
171 /* TextSubtitleContent */
172 shared_ptr<const TextSubtitleContent> rc = dynamic_pointer_cast<const TextSubtitleContent> (i);
174 decoder.reset (new TextSubtitleDecoder (rc));
175 frc = FrameRateChange (rc->active_video_frame_rate(), _film->video_frame_rate());
178 /* DCPSubtitleContent */
179 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
181 decoder.reset (new DCPSubtitleDecoder (dsc));
182 frc = FrameRateChange (dsc->active_video_frame_rate(), _film->video_frame_rate());
185 /* VideoMXFContent */
186 shared_ptr<const VideoMXFContent> vmc = dynamic_pointer_cast<const VideoMXFContent> (i);
188 decoder.reset (new VideoMXFDecoder (vmc, _film->log()));
189 frc = FrameRateChange (vmc->active_video_frame_rate(), _film->video_frame_rate());
193 /* Not something that we can decode; e.g. Atmos content */
197 if (decoder->video && _ignore_video) {
198 decoder->video->set_ignore ();
201 if (decoder->audio && _ignore_audio) {
202 decoder->audio->set_ignore ();
205 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
208 _have_valid_pieces = true;
212 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
214 shared_ptr<Content> c = w.lock ();
220 property == ContentProperty::POSITION ||
221 property == ContentProperty::LENGTH ||
222 property == ContentProperty::TRIM_START ||
223 property == ContentProperty::TRIM_END ||
224 property == ContentProperty::PATH ||
225 property == VideoContentProperty::FRAME_TYPE ||
226 property == DCPContentProperty::CAN_BE_PLAYED ||
227 property == SubtitleContentProperty::COLOUR ||
228 property == SubtitleContentProperty::OUTLINE ||
229 property == SubtitleContentProperty::OUTLINE_COLOUR ||
230 property == FFmpegContentProperty::SUBTITLE_STREAM
233 _have_valid_pieces = false;
237 property == ContentProperty::VIDEO_FRAME_RATE ||
238 property == SubtitleContentProperty::USE ||
239 property == SubtitleContentProperty::X_OFFSET ||
240 property == SubtitleContentProperty::Y_OFFSET ||
241 property == SubtitleContentProperty::X_SCALE ||
242 property == SubtitleContentProperty::Y_SCALE ||
243 property == SubtitleContentProperty::FONTS ||
244 property == VideoContentProperty::CROP ||
245 property == VideoContentProperty::SCALE ||
246 property == VideoContentProperty::FADE_IN ||
247 property == VideoContentProperty::FADE_OUT ||
248 property == VideoContentProperty::COLOUR_CONVERSION
256 Player::set_video_container_size (dcp::Size s)
258 _video_container_size = s;
260 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
261 _black_image->make_black ();
265 Player::playlist_changed ()
267 _have_valid_pieces = false;
272 Player::film_changed (Film::Property p)
274 /* Here we should notice Film properties that affect our output, and
275 alert listeners that our output now would be different to how it was
276 last time we were run.
279 if (p == Film::CONTAINER) {
281 } else if (p == Film::VIDEO_FRAME_RATE) {
282 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
283 so we need new pieces here.
285 _have_valid_pieces = false;
287 } else if (p == Film::AUDIO_PROCESSOR) {
288 if (_film->audio_processor ()) {
289 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
295 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
297 list<PositionImage> all;
299 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
304 /* We will scale the subtitle up to fit _video_container_size */
305 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
307 /* Then we need a corrective translation, consisting of two parts:
309 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
310 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
312 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
313 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
314 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
316 * Combining these two translations gives these expressions.
323 dcp::YUV_TO_RGB_REC601,
324 i->image->pixel_format (),
329 lrint (_video_container_size.width * i->rectangle.x),
330 lrint (_video_container_size.height * i->rectangle.y)
339 shared_ptr<PlayerVideo>
340 Player::black_player_video_frame (DCPTime time) const
342 return shared_ptr<PlayerVideo> (
344 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
348 _video_container_size,
349 _video_container_size,
352 PresetColourConversion::all().front().conversion
357 /** @return All PlayerVideos at the given time. There may be none if the content
358 * at `time' is a DCP which we are passing through (i.e. referring to by reference)
359 * or 2 if we have 3D.
361 list<shared_ptr<PlayerVideo> >
362 Player::get_video (DCPTime time, bool accurate)
364 if (!_have_valid_pieces) {
368 /* Find subtitles for possible burn-in */
370 PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate);
372 list<PositionImage> sub_images;
374 /* Image subtitles */
375 list<PositionImage> c = transform_image_subtitles (ps.image);
376 copy (c.begin(), c.end(), back_inserter (sub_images));
378 /* Text subtitles (rendered to an image) */
379 if (!ps.text.empty ()) {
380 list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
381 copy (s.begin (), s.end (), back_inserter (sub_images));
384 optional<PositionImage> subtitles;
385 if (!sub_images.empty ()) {
386 subtitles = merge (sub_images);
389 /* Find pieces containing video which is happening now */
391 list<shared_ptr<Piece> > ov = overlaps (
393 time + DCPTime::from_frames (1, _film->video_frame_rate ()),
397 list<shared_ptr<PlayerVideo> > pvf;
400 /* No video content at this time */
401 pvf.push_back (black_player_video_frame (time));
403 /* Some video content at this time */
404 shared_ptr<Piece> last = *(ov.rbegin ());
405 VideoFrameType const last_type = last->content->video->frame_type ();
407 /* Get video from appropriate piece(s) */
408 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
410 shared_ptr<VideoDecoder> decoder = piece->decoder->video;
411 DCPOMATIC_ASSERT (decoder);
413 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (piece->content);
414 if (dcp_content && dcp_content->reference_video () && !_play_referenced) {
419 /* always use the last video */
421 /* with a corresponding L/R eye if appropriate */
422 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
423 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
426 /* We want to use this piece */
427 list<ContentVideo> content_video = decoder->get (dcp_to_content_video (piece, time), accurate);
428 if (content_video.empty ()) {
429 pvf.push_back (black_player_video_frame (time));
431 dcp::Size image_size = piece->content->video->scale().size (
432 piece->content->video, _video_container_size, _film->frame_size ()
435 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
437 shared_ptr<PlayerVideo> (
440 content_video_to_dcp (piece, i->frame),
441 piece->content->video->crop (),
442 piece->content->video->fade (i->frame),
444 _video_container_size,
447 piece->content->video->colour_conversion ()
454 /* Discard unused video */
455 decoder->get (dcp_to_content_video (piece, time), accurate);
461 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
462 p->set_subtitle (subtitles.get ());
469 /** @return Audio data or 0 if the only audio data here is referenced DCP data */
470 shared_ptr<AudioBuffers>
471 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
473 if (!_have_valid_pieces) {
477 Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
479 shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
480 audio->make_silent ();
482 list<shared_ptr<Piece> > ov = overlaps (time, time + length, has_audio);
487 bool all_referenced = true;
488 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
489 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (i->content);
490 if (i->content->audio && (!dcp_content || !dcp_content->reference_audio ())) {
491 /* There is audio content which is not from a DCP or not set to be referenced */
492 all_referenced = false;
496 if (all_referenced && !_play_referenced) {
497 return shared_ptr<AudioBuffers> ();
500 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
502 DCPOMATIC_ASSERT (i->content->audio);
503 shared_ptr<AudioDecoder> decoder = i->decoder->audio;
504 DCPOMATIC_ASSERT (decoder);
506 /* The time that we should request from the content */
507 DCPTime request = time - DCPTime::from_seconds (i->content->audio->delay() / 1000.0);
508 Frame request_frames = length_frames;
510 if (request < DCPTime ()) {
511 /* We went off the start of the content, so we will need to offset
512 the stuff we get back.
515 request_frames += request.frames_round (_film->audio_frame_rate ());
516 if (request_frames < 0) {
519 request = DCPTime ();
522 Frame const content_frame = dcp_to_resampled_audio (i, request);
524 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams ()) {
526 if (j->channels() == 0) {
527 /* Some content (e.g. DCPs) can have streams with no channels */
531 /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
532 ContentAudio all = decoder->get (j, content_frame, request_frames, accurate);
535 if (i->content->audio->gain() != 0) {
536 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
537 gain->apply_gain (i->content->audio->gain ());
542 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
543 dcp_mapped->make_silent ();
544 AudioMapping map = j->mapping ();
545 for (int i = 0; i < map.input_channels(); ++i) {
546 for (int j = 0; j < _film->audio_channels(); ++j) {
547 if (map.get (i, j) > 0) {
548 dcp_mapped->accumulate_channel (
558 if (_audio_processor) {
559 dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ());
562 all.audio = dcp_mapped;
564 audio->accumulate_frames (
566 content_frame - all.frame,
567 offset.frames_round (_film->audio_frame_rate()),
568 min (Frame (all.audio->frames()), request_frames)
577 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
579 DCPTime s = t - piece->content->position ();
580 s = min (piece->content->length_after_trim(), s);
581 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
583 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
584 then convert that ContentTime to frames at the content's rate. However this fails for
585 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
586 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
588 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
590 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
594 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
596 /* See comment in dcp_to_content_video */
597 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
598 return max (DCPTime (), d + piece->content->position ());
602 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
604 DCPTime s = t - piece->content->position ();
605 s = min (piece->content->length_after_trim(), s);
606 /* See notes in dcp_to_content_video */
607 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
611 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
613 DCPTime s = t - piece->content->position ();
614 s = min (piece->content->length_after_trim(), s);
615 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
619 Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
621 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
624 /** @param burnt true to return only subtitles to be burnt, false to return only
625 * subtitles that should not be burnt. This parameter will be ignored if
626 * _always_burn_subtitles is true; in this case, all subtitles will be returned.
629 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate)
631 list<shared_ptr<Piece> > subs = overlaps (time, time + length, has_subtitle);
633 PlayerSubtitles ps (time, length);
635 for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
636 if (!(*j)->content->subtitle->use () || (!_always_burn_subtitles && (burnt != (*j)->content->subtitle->burn ()))) {
640 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> ((*j)->content);
641 if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) {
645 shared_ptr<SubtitleDecoder> subtitle_decoder = (*j)->decoder->subtitle;
646 ContentTime const from = dcp_to_content_subtitle (*j, time);
647 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
648 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
650 list<ContentImageSubtitle> image = subtitle_decoder->get_image (ContentTimePeriod (from, to), starting, accurate);
651 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
653 /* Apply content's subtitle offsets */
654 i->sub.rectangle.x += (*j)->content->subtitle->x_offset ();
655 i->sub.rectangle.y += (*j)->content->subtitle->y_offset ();
657 /* Apply content's subtitle scale */
658 i->sub.rectangle.width *= (*j)->content->subtitle->x_scale ();
659 i->sub.rectangle.height *= (*j)->content->subtitle->y_scale ();
661 /* Apply a corrective translation to keep the subtitle centred after that scale */
662 i->sub.rectangle.x -= i->sub.rectangle.width * ((*j)->content->subtitle->x_scale() - 1);
663 i->sub.rectangle.y -= i->sub.rectangle.height * ((*j)->content->subtitle->y_scale() - 1);
665 ps.image.push_back (i->sub);
668 list<ContentTextSubtitle> text = subtitle_decoder->get_text (ContentTimePeriod (from, to), starting, accurate);
669 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
670 BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
671 s.set_h_position (s.h_position() + (*j)->content->subtitle->x_offset ());
672 s.set_v_position (s.v_position() + (*j)->content->subtitle->y_offset ());
673 float const xs = (*j)->content->subtitle->x_scale();
674 float const ys = (*j)->content->subtitle->y_scale();
675 float size = s.size();
677 /* Adjust size to express the common part of the scaling;
678 e.g. if xs = ys = 0.5 we scale size by 2.
680 if (xs > 1e-5 && ys > 1e-5) {
681 size *= 1 / min (1 / xs, 1 / ys);
685 /* Then express aspect ratio changes */
686 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
687 s.set_aspect_adjust (xs / ys);
689 s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000));
690 s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000));
691 ps.text.push_back (s);
692 ps.add_fonts ((*j)->content->subtitle->fonts ());
700 list<shared_ptr<Font> >
701 Player::get_subtitle_fonts ()
703 if (!_have_valid_pieces) {
707 list<shared_ptr<Font> > fonts;
708 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
709 if (p->content->subtitle) {
710 /* XXX: things may go wrong if there are duplicate font IDs
711 with different font files.
713 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
714 copy (f.begin(), f.end(), back_inserter (fonts));
721 /** Set this player never to produce any video data */
723 Player::set_ignore_video ()
725 _ignore_video = true;
728 /** Set this player never to produce any audio data */
730 Player::set_ignore_audio ()
732 _ignore_audio = true;
735 /** Set whether or not this player should always burn text subtitles into the image,
736 * regardless of the content settings.
737 * @param burn true to always burn subtitles, false to obey content settings.
740 Player::set_always_burn_subtitles (bool burn)
742 _always_burn_subtitles = burn;
749 _have_valid_pieces = false;
753 Player::set_play_referenced ()
755 _play_referenced = true;
756 _have_valid_pieces = false;
759 list<ReferencedReelAsset>
760 Player::get_reel_assets ()
762 list<ReferencedReelAsset> a;
764 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
765 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
770 scoped_ptr<DCPDecoder> decoder;
772 decoder.reset (new DCPDecoder (j, _film->log(), false));
778 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
779 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
780 if (j->reference_video ()) {
782 ReferencedReelAsset (
784 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate()))
789 if (j->reference_audio ()) {
791 ReferencedReelAsset (
793 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate()))
798 if (j->reference_subtitle ()) {
799 DCPOMATIC_ASSERT (k->main_subtitle ());
801 ReferencedReelAsset (
803 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate()))
808 /* Assume that main picture duration is the length of the reel */
809 offset += k->main_picture()->duration ();
816 list<shared_ptr<Piece> >
817 Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
819 if (!_have_valid_pieces) {
823 list<shared_ptr<Piece> > overlaps;
824 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
825 if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
826 overlaps.push_back (i);