2 Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include "ffmpeg_decoder.h"
23 #include "video_decoder.h"
24 #include "audio_decoder.h"
25 #include "audio_buffers.h"
26 #include "audio_content.h"
27 #include "ffmpeg_content.h"
28 #include "image_decoder.h"
29 #include "content_audio.h"
30 #include "image_content.h"
31 #include "subtitle_content.h"
32 #include "text_subtitle_decoder.h"
33 #include "text_subtitle_content.h"
34 #include "video_mxf_decoder.h"
35 #include "video_mxf_content.h"
36 #include "dcp_content.h"
39 #include "raw_image_proxy.h"
42 #include "render_subtitles.h"
44 #include "content_video.h"
45 #include "player_video.h"
46 #include "frame_rate_change.h"
47 #include "dcp_content.h"
48 #include "dcp_decoder.h"
49 #include "dcp_subtitle_content.h"
50 #include "dcp_subtitle_decoder.h"
51 #include "audio_processor.h"
53 #include "referenced_reel_asset.h"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <boost/foreach.hpp>
65 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
77 using boost::shared_ptr;
78 using boost::weak_ptr;
79 using boost::dynamic_pointer_cast;
80 using boost::optional;
81 using boost::scoped_ptr;
84 has_video (Content* c)
86 return static_cast<bool>(c->video);
90 has_audio (Content* c)
92 return static_cast<bool>(c->audio);
96 has_subtitle (Content* c)
98 return static_cast<bool>(c->subtitle);
101 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
103 , _playlist (playlist)
104 , _have_valid_pieces (false)
105 , _ignore_video (false)
106 , _ignore_audio (false)
107 , _always_burn_subtitles (false)
109 , _play_referenced (false)
111 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
112 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
113 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
114 set_video_container_size (_film->frame_size ());
116 film_changed (Film::AUDIO_PROCESSOR);
120 Player::setup_pieces ()
122 list<shared_ptr<Piece> > old_pieces = _pieces;
125 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
127 if (!i->paths_valid ()) {
131 shared_ptr<Decoder> decoder;
132 optional<FrameRateChange> frc;
135 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
137 decoder.reset (new FFmpegDecoder (fc, _film->log(), _fast));
138 frc = FrameRateChange (fc->active_video_frame_rate(), _film->video_frame_rate());
141 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
143 decoder.reset (new DCPDecoder (dc, _film->log(), _fast));
144 frc = FrameRateChange (dc->active_video_frame_rate(), _film->video_frame_rate());
148 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
150 /* See if we can re-use an old ImageDecoder */
151 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
152 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
153 if (imd && imd->content() == ic) {
159 decoder.reset (new ImageDecoder (ic, _film->log()));
162 frc = FrameRateChange (ic->active_video_frame_rate(), _film->video_frame_rate());
165 /* It's questionable whether subtitle content should have a video frame rate; perhaps
166 it should be assumed that any subtitle content has been prepared at the same rate
167 as simultaneous video content (like we do with audio).
170 /* TextSubtitleContent */
171 shared_ptr<const TextSubtitleContent> rc = dynamic_pointer_cast<const TextSubtitleContent> (i);
173 decoder.reset (new TextSubtitleDecoder (rc));
174 frc = FrameRateChange (rc->active_video_frame_rate(), _film->video_frame_rate());
177 /* DCPSubtitleContent */
178 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
180 decoder.reset (new DCPSubtitleDecoder (dsc));
181 frc = FrameRateChange (dsc->active_video_frame_rate(), _film->video_frame_rate());
184 /* VideoMXFContent */
185 shared_ptr<const VideoMXFContent> vmc = dynamic_pointer_cast<const VideoMXFContent> (i);
187 decoder.reset (new VideoMXFDecoder (vmc, _film->log()));
188 frc = FrameRateChange (vmc->active_video_frame_rate(), _film->video_frame_rate());
191 DCPOMATIC_ASSERT (decoder);
193 if (decoder->video && _ignore_video) {
194 decoder->video->set_ignore ();
197 if (decoder->audio && _ignore_audio) {
198 decoder->audio->set_ignore ();
201 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
204 _have_valid_pieces = true;
208 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
210 shared_ptr<Content> c = w.lock ();
216 property == ContentProperty::POSITION ||
217 property == ContentProperty::LENGTH ||
218 property == ContentProperty::TRIM_START ||
219 property == ContentProperty::TRIM_END ||
220 property == ContentProperty::PATH ||
221 property == VideoContentProperty::FRAME_TYPE ||
222 property == DCPContentProperty::CAN_BE_PLAYED ||
223 property == SubtitleContentProperty::COLOUR ||
224 property == SubtitleContentProperty::OUTLINE ||
225 property == SubtitleContentProperty::OUTLINE_COLOUR ||
226 property == FFmpegContentProperty::SUBTITLE_STREAM
229 _have_valid_pieces = false;
233 property == ContentProperty::VIDEO_FRAME_RATE ||
234 property == SubtitleContentProperty::USE ||
235 property == SubtitleContentProperty::X_OFFSET ||
236 property == SubtitleContentProperty::Y_OFFSET ||
237 property == SubtitleContentProperty::X_SCALE ||
238 property == SubtitleContentProperty::Y_SCALE ||
239 property == SubtitleContentProperty::FONTS ||
240 property == VideoContentProperty::CROP ||
241 property == VideoContentProperty::SCALE ||
242 property == VideoContentProperty::FADE_IN ||
243 property == VideoContentProperty::FADE_OUT ||
244 property == VideoContentProperty::COLOUR_CONVERSION
252 Player::set_video_container_size (dcp::Size s)
254 _video_container_size = s;
256 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
257 _black_image->make_black ();
261 Player::playlist_changed ()
263 _have_valid_pieces = false;
268 Player::film_changed (Film::Property p)
270 /* Here we should notice Film properties that affect our output, and
271 alert listeners that our output now would be different to how it was
272 last time we were run.
275 if (p == Film::CONTAINER) {
277 } else if (p == Film::VIDEO_FRAME_RATE) {
278 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
279 so we need new pieces here.
281 _have_valid_pieces = false;
283 } else if (p == Film::AUDIO_PROCESSOR) {
284 if (_film->audio_processor ()) {
285 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
291 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
293 list<PositionImage> all;
295 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
300 /* We will scale the subtitle up to fit _video_container_size */
301 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
303 /* Then we need a corrective translation, consisting of two parts:
305 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
306 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
308 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
309 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
310 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
312 * Combining these two translations gives these expressions.
319 dcp::YUV_TO_RGB_REC601,
320 i->image->pixel_format (),
325 lrint (_video_container_size.width * i->rectangle.x),
326 lrint (_video_container_size.height * i->rectangle.y)
335 shared_ptr<PlayerVideo>
336 Player::black_player_video_frame (DCPTime time) const
338 return shared_ptr<PlayerVideo> (
340 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
344 _video_container_size,
345 _video_container_size,
348 PresetColourConversion::all().front().conversion
353 /** @return All PlayerVideos at the given time. There may be none if the content
354 * at `time' is a DCP which we are passing through (i.e. referring to by reference)
355 * or 2 if we have 3D.
357 list<shared_ptr<PlayerVideo> >
358 Player::get_video (DCPTime time, bool accurate)
360 if (!_have_valid_pieces) {
364 /* Find subtitles for possible burn-in */
366 PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate);
368 list<PositionImage> sub_images;
370 /* Image subtitles */
371 list<PositionImage> c = transform_image_subtitles (ps.image);
372 copy (c.begin(), c.end(), back_inserter (sub_images));
374 /* Text subtitles (rendered to an image) */
375 if (!ps.text.empty ()) {
376 list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
377 copy (s.begin (), s.end (), back_inserter (sub_images));
380 optional<PositionImage> subtitles;
381 if (!sub_images.empty ()) {
382 subtitles = merge (sub_images);
385 /* Find pieces containing video which is happening now */
387 list<shared_ptr<Piece> > ov = overlaps (
389 time + DCPTime::from_frames (1, _film->video_frame_rate ()),
393 list<shared_ptr<PlayerVideo> > pvf;
396 /* No video content at this time */
397 pvf.push_back (black_player_video_frame (time));
399 /* Some video content at this time */
400 shared_ptr<Piece> last = *(ov.rbegin ());
401 VideoFrameType const last_type = last->content->video->frame_type ();
403 /* Get video from appropriate piece(s) */
404 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
406 shared_ptr<VideoDecoder> decoder = piece->decoder->video;
407 DCPOMATIC_ASSERT (decoder);
409 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (piece->content);
410 if (dcp_content && dcp_content->reference_video () && !_play_referenced) {
415 /* always use the last video */
417 /* with a corresponding L/R eye if appropriate */
418 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
419 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
422 /* We want to use this piece */
423 list<ContentVideo> content_video = decoder->get (dcp_to_content_video (piece, time), accurate);
424 if (content_video.empty ()) {
425 pvf.push_back (black_player_video_frame (time));
427 dcp::Size image_size = piece->content->video->scale().size (
428 piece->content->video, _video_container_size, _film->frame_size ()
431 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
433 shared_ptr<PlayerVideo> (
436 content_video_to_dcp (piece, i->frame),
437 piece->content->video->crop (),
438 piece->content->video->fade (i->frame),
440 _video_container_size,
443 piece->content->video->colour_conversion ()
450 /* Discard unused video */
451 decoder->get (dcp_to_content_video (piece, time), accurate);
457 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
458 p->set_subtitle (subtitles.get ());
465 /** @return Audio data or 0 if the only audio data here is referenced DCP data */
466 shared_ptr<AudioBuffers>
467 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
469 if (!_have_valid_pieces) {
473 Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
475 shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
476 audio->make_silent ();
478 list<shared_ptr<Piece> > ov = overlaps (time, time + length, has_audio);
483 bool all_referenced = true;
484 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
485 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (i->content);
486 if (i->content->audio && (!dcp_content || !dcp_content->reference_audio ())) {
487 /* There is audio content which is not from a DCP or not set to be referenced */
488 all_referenced = false;
492 if (all_referenced && !_play_referenced) {
493 return shared_ptr<AudioBuffers> ();
496 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
498 DCPOMATIC_ASSERT (i->content->audio);
499 shared_ptr<AudioDecoder> decoder = i->decoder->audio;
500 DCPOMATIC_ASSERT (decoder);
502 /* The time that we should request from the content */
503 DCPTime request = time - DCPTime::from_seconds (i->content->audio->delay() / 1000.0);
504 Frame request_frames = length_frames;
506 if (request < DCPTime ()) {
507 /* We went off the start of the content, so we will need to offset
508 the stuff we get back.
511 request_frames += request.frames_round (_film->audio_frame_rate ());
512 if (request_frames < 0) {
515 request = DCPTime ();
518 Frame const content_frame = dcp_to_resampled_audio (i, request);
520 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams ()) {
522 if (j->channels() == 0) {
523 /* Some content (e.g. DCPs) can have streams with no channels */
527 /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
528 ContentAudio all = decoder->get (j, content_frame, request_frames, accurate);
531 if (i->content->audio->gain() != 0) {
532 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
533 gain->apply_gain (i->content->audio->gain ());
538 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
539 dcp_mapped->make_silent ();
540 AudioMapping map = j->mapping ();
541 for (int i = 0; i < map.input_channels(); ++i) {
542 for (int j = 0; j < _film->audio_channels(); ++j) {
543 if (map.get (i, j) > 0) {
544 dcp_mapped->accumulate_channel (
554 if (_audio_processor) {
555 dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ());
558 all.audio = dcp_mapped;
560 audio->accumulate_frames (
562 content_frame - all.frame,
563 offset.frames_round (_film->audio_frame_rate()),
564 min (Frame (all.audio->frames()), request_frames)
573 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
575 DCPTime s = t - piece->content->position ();
576 s = min (piece->content->length_after_trim(), s);
577 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
579 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
580 then convert that ContentTime to frames at the content's rate. However this fails for
581 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
582 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
584 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
586 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
590 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
592 /* See comment in dcp_to_content_video */
593 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
594 return max (DCPTime (), d + piece->content->position ());
598 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
600 DCPTime s = t - piece->content->position ();
601 s = min (piece->content->length_after_trim(), s);
602 /* See notes in dcp_to_content_video */
603 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
607 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
609 DCPTime s = t - piece->content->position ();
610 s = min (piece->content->length_after_trim(), s);
611 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
615 Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
617 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
620 /** @param burnt true to return only subtitles to be burnt, false to return only
621 * subtitles that should not be burnt. This parameter will be ignored if
622 * _always_burn_subtitles is true; in this case, all subtitles will be returned.
625 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate)
627 list<shared_ptr<Piece> > subs = overlaps (time, time + length, has_subtitle);
629 PlayerSubtitles ps (time, length);
631 for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
632 if (!(*j)->content->subtitle->use () || (!_always_burn_subtitles && (burnt != (*j)->content->subtitle->burn ()))) {
636 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> ((*j)->content);
637 if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) {
641 shared_ptr<SubtitleDecoder> subtitle_decoder = (*j)->decoder->subtitle;
642 ContentTime const from = dcp_to_content_subtitle (*j, time);
643 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
644 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
646 list<ContentImageSubtitle> image = subtitle_decoder->get_image (ContentTimePeriod (from, to), starting, accurate);
647 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
649 /* Apply content's subtitle offsets */
650 i->sub.rectangle.x += (*j)->content->subtitle->x_offset ();
651 i->sub.rectangle.y += (*j)->content->subtitle->y_offset ();
653 /* Apply content's subtitle scale */
654 i->sub.rectangle.width *= (*j)->content->subtitle->x_scale ();
655 i->sub.rectangle.height *= (*j)->content->subtitle->y_scale ();
657 /* Apply a corrective translation to keep the subtitle centred after that scale */
658 i->sub.rectangle.x -= i->sub.rectangle.width * ((*j)->content->subtitle->x_scale() - 1);
659 i->sub.rectangle.y -= i->sub.rectangle.height * ((*j)->content->subtitle->y_scale() - 1);
661 ps.image.push_back (i->sub);
664 list<ContentTextSubtitle> text = subtitle_decoder->get_text (ContentTimePeriod (from, to), starting, accurate);
665 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
666 BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
667 s.set_h_position (s.h_position() + (*j)->content->subtitle->x_offset ());
668 s.set_v_position (s.v_position() + (*j)->content->subtitle->y_offset ());
669 float const xs = (*j)->content->subtitle->x_scale();
670 float const ys = (*j)->content->subtitle->y_scale();
671 float size = s.size();
673 /* Adjust size to express the common part of the scaling;
674 e.g. if xs = ys = 0.5 we scale size by 2.
676 if (xs > 1e-5 && ys > 1e-5) {
677 size *= 1 / min (1 / xs, 1 / ys);
681 /* Then express aspect ratio changes */
682 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
683 s.set_aspect_adjust (xs / ys);
685 s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000));
686 s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000));
687 ps.text.push_back (s);
688 ps.add_fonts ((*j)->content->subtitle->fonts ());
696 list<shared_ptr<Font> >
697 Player::get_subtitle_fonts ()
699 if (!_have_valid_pieces) {
703 list<shared_ptr<Font> > fonts;
704 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
705 if (p->content->subtitle) {
706 /* XXX: things may go wrong if there are duplicate font IDs
707 with different font files.
709 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
710 copy (f.begin(), f.end(), back_inserter (fonts));
717 /** Set this player never to produce any video data */
719 Player::set_ignore_video ()
721 _ignore_video = true;
724 /** Set this player never to produce any audio data */
726 Player::set_ignore_audio ()
728 _ignore_audio = true;
731 /** Set whether or not this player should always burn text subtitles into the image,
732 * regardless of the content settings.
733 * @param burn true to always burn subtitles, false to obey content settings.
736 Player::set_always_burn_subtitles (bool burn)
738 _always_burn_subtitles = burn;
745 _have_valid_pieces = false;
749 Player::set_play_referenced ()
751 _play_referenced = true;
752 _have_valid_pieces = false;
755 list<ReferencedReelAsset>
756 Player::get_reel_assets ()
758 list<ReferencedReelAsset> a;
760 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
761 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
766 scoped_ptr<DCPDecoder> decoder;
768 decoder.reset (new DCPDecoder (j, _film->log(), false));
774 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
775 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
776 if (j->reference_video ()) {
778 ReferencedReelAsset (
780 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate()))
785 if (j->reference_audio ()) {
787 ReferencedReelAsset (
789 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate()))
794 if (j->reference_subtitle ()) {
795 DCPOMATIC_ASSERT (k->main_subtitle ());
797 ReferencedReelAsset (
799 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate()))
804 /* Assume that main picture duration is the length of the reel */
805 offset += k->main_picture()->duration ();
812 list<shared_ptr<Piece> >
813 Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
815 if (!_have_valid_pieces) {
819 list<shared_ptr<Piece> > overlaps;
820 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
821 if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
822 overlaps.push_back (i);