2 Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include "ffmpeg_decoder.h"
23 #include "video_decoder.h"
24 #include "audio_decoder.h"
25 #include "audio_buffers.h"
26 #include "audio_content.h"
27 #include "ffmpeg_content.h"
28 #include "image_decoder.h"
29 #include "content_audio.h"
30 #include "image_content.h"
31 #include "subtitle_content.h"
32 #include "text_subtitle_decoder.h"
33 #include "text_subtitle_content.h"
34 #include "video_mxf_decoder.h"
35 #include "video_mxf_content.h"
36 #include "dcp_content.h"
39 #include "raw_image_proxy.h"
42 #include "render_subtitles.h"
44 #include "content_video.h"
45 #include "player_video.h"
46 #include "frame_rate_change.h"
47 #include "dcp_content.h"
48 #include "dcp_decoder.h"
49 #include "dcp_subtitle_content.h"
50 #include "dcp_subtitle_decoder.h"
51 #include "audio_processor.h"
53 #include "referenced_reel_asset.h"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <boost/foreach.hpp>
65 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
77 using boost::shared_ptr;
78 using boost::weak_ptr;
79 using boost::dynamic_pointer_cast;
80 using boost::optional;
81 using boost::scoped_ptr;
84 has_video (Content* c)
86 return static_cast<bool>(c->video);
90 has_audio (Content* c)
92 return static_cast<bool>(c->audio);
96 has_subtitle (Content* c)
98 return static_cast<bool>(c->subtitle);
101 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
103 , _playlist (playlist)
104 , _have_valid_pieces (false)
105 , _ignore_video (false)
106 , _ignore_audio (false)
107 , _always_burn_subtitles (false)
109 , _play_referenced (false)
111 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
112 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
113 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
114 set_video_container_size (_film->frame_size ());
116 film_changed (Film::AUDIO_PROCESSOR);
120 Player::setup_pieces ()
122 list<shared_ptr<Piece> > old_pieces = _pieces;
125 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
127 if (!i->paths_valid ()) {
131 shared_ptr<Decoder> decoder;
132 optional<FrameRateChange> frc;
135 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
137 decoder.reset (new FFmpegDecoder (fc, _film->log(), _fast));
138 frc = FrameRateChange (fc->active_video_frame_rate(), _film->video_frame_rate());
141 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
143 decoder.reset (new DCPDecoder (dc, _film->log(), _fast));
144 frc = FrameRateChange (dc->active_video_frame_rate(), _film->video_frame_rate());
148 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
150 /* See if we can re-use an old ImageDecoder */
151 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
152 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
153 if (imd && imd->content() == ic) {
159 decoder.reset (new ImageDecoder (ic, _film->log()));
162 frc = FrameRateChange (ic->active_video_frame_rate(), _film->video_frame_rate());
165 /* It's questionable whether subtitle content should have a video frame rate; perhaps
166 it should be assumed that any subtitle content has been prepared at the same rate
167 as simultaneous video content (like we do with audio).
170 /* TextSubtitleContent */
171 shared_ptr<const TextSubtitleContent> rc = dynamic_pointer_cast<const TextSubtitleContent> (i);
173 decoder.reset (new TextSubtitleDecoder (rc));
174 frc = FrameRateChange (rc->active_video_frame_rate(), _film->video_frame_rate());
177 /* DCPSubtitleContent */
178 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
180 decoder.reset (new DCPSubtitleDecoder (dsc));
181 frc = FrameRateChange (dsc->active_video_frame_rate(), _film->video_frame_rate());
184 /* VideoMXFContent */
185 shared_ptr<const VideoMXFContent> vmc = dynamic_pointer_cast<const VideoMXFContent> (i);
187 decoder.reset (new VideoMXFDecoder (vmc, _film->log()));
188 frc = FrameRateChange (vmc->active_video_frame_rate(), _film->video_frame_rate());
192 /* Not something that we can decode; e.g. Atmos content */
196 if (decoder->video && _ignore_video) {
197 decoder->video->set_ignore ();
200 if (decoder->audio && _ignore_audio) {
201 decoder->audio->set_ignore ();
204 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
207 _have_valid_pieces = true;
211 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
213 shared_ptr<Content> c = w.lock ();
219 property == ContentProperty::POSITION ||
220 property == ContentProperty::LENGTH ||
221 property == ContentProperty::TRIM_START ||
222 property == ContentProperty::TRIM_END ||
223 property == ContentProperty::PATH ||
224 property == VideoContentProperty::FRAME_TYPE ||
225 property == DCPContentProperty::CAN_BE_PLAYED ||
226 property == SubtitleContentProperty::COLOUR ||
227 property == SubtitleContentProperty::OUTLINE ||
228 property == SubtitleContentProperty::OUTLINE_COLOUR ||
229 property == FFmpegContentProperty::SUBTITLE_STREAM
232 _have_valid_pieces = false;
236 property == ContentProperty::VIDEO_FRAME_RATE ||
237 property == SubtitleContentProperty::USE ||
238 property == SubtitleContentProperty::X_OFFSET ||
239 property == SubtitleContentProperty::Y_OFFSET ||
240 property == SubtitleContentProperty::X_SCALE ||
241 property == SubtitleContentProperty::Y_SCALE ||
242 property == SubtitleContentProperty::FONTS ||
243 property == VideoContentProperty::CROP ||
244 property == VideoContentProperty::SCALE ||
245 property == VideoContentProperty::FADE_IN ||
246 property == VideoContentProperty::FADE_OUT ||
247 property == VideoContentProperty::COLOUR_CONVERSION
255 Player::set_video_container_size (dcp::Size s)
257 _video_container_size = s;
259 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
260 _black_image->make_black ();
264 Player::playlist_changed ()
266 _have_valid_pieces = false;
271 Player::film_changed (Film::Property p)
273 /* Here we should notice Film properties that affect our output, and
274 alert listeners that our output now would be different to how it was
275 last time we were run.
278 if (p == Film::CONTAINER) {
280 } else if (p == Film::VIDEO_FRAME_RATE) {
281 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
282 so we need new pieces here.
284 _have_valid_pieces = false;
286 } else if (p == Film::AUDIO_PROCESSOR) {
287 if (_film->audio_processor ()) {
288 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
294 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
296 list<PositionImage> all;
298 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
303 /* We will scale the subtitle up to fit _video_container_size */
304 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
306 /* Then we need a corrective translation, consisting of two parts:
308 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
309 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
311 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
312 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
313 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
315 * Combining these two translations gives these expressions.
322 dcp::YUV_TO_RGB_REC601,
323 i->image->pixel_format (),
328 lrint (_video_container_size.width * i->rectangle.x),
329 lrint (_video_container_size.height * i->rectangle.y)
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (DCPTime time) const
341 return shared_ptr<PlayerVideo> (
343 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
347 _video_container_size,
348 _video_container_size,
351 PresetColourConversion::all().front().conversion
356 /** @return All PlayerVideos at the given time. There may be none if the content
357 * at `time' is a DCP which we are passing through (i.e. referring to by reference)
358 * or 2 if we have 3D.
360 list<shared_ptr<PlayerVideo> >
361 Player::get_video (DCPTime time, bool accurate)
363 if (!_have_valid_pieces) {
367 /* Find subtitles for possible burn-in */
369 PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate);
371 list<PositionImage> sub_images;
373 /* Image subtitles */
374 list<PositionImage> c = transform_image_subtitles (ps.image);
375 copy (c.begin(), c.end(), back_inserter (sub_images));
377 /* Text subtitles (rendered to an image) */
378 if (!ps.text.empty ()) {
379 list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
380 copy (s.begin (), s.end (), back_inserter (sub_images));
383 optional<PositionImage> subtitles;
384 if (!sub_images.empty ()) {
385 subtitles = merge (sub_images);
388 /* Find pieces containing video which is happening now */
390 list<shared_ptr<Piece> > ov = overlaps (
392 time + DCPTime::from_frames (1, _film->video_frame_rate ()),
396 list<shared_ptr<PlayerVideo> > pvf;
399 /* No video content at this time */
400 pvf.push_back (black_player_video_frame (time));
402 /* Some video content at this time */
403 shared_ptr<Piece> last = *(ov.rbegin ());
404 VideoFrameType const last_type = last->content->video->frame_type ();
406 /* Get video from appropriate piece(s) */
407 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
409 shared_ptr<VideoDecoder> decoder = piece->decoder->video;
410 DCPOMATIC_ASSERT (decoder);
412 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (piece->content);
413 if (dcp_content && dcp_content->reference_video () && !_play_referenced) {
418 /* always use the last video */
420 /* with a corresponding L/R eye if appropriate */
421 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
422 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
425 /* We want to use this piece */
426 list<ContentVideo> content_video = decoder->get (dcp_to_content_video (piece, time), accurate);
427 if (content_video.empty ()) {
428 pvf.push_back (black_player_video_frame (time));
430 dcp::Size image_size = piece->content->video->scale().size (
431 piece->content->video, _video_container_size, _film->frame_size ()
434 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
436 shared_ptr<PlayerVideo> (
439 content_video_to_dcp (piece, i->frame),
440 piece->content->video->crop (),
441 piece->content->video->fade (i->frame),
443 _video_container_size,
446 piece->content->video->colour_conversion ()
453 /* Discard unused video */
454 decoder->get (dcp_to_content_video (piece, time), accurate);
460 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
461 p->set_subtitle (subtitles.get ());
468 /** @return Audio data or 0 if the only audio data here is referenced DCP data */
469 shared_ptr<AudioBuffers>
470 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
472 if (!_have_valid_pieces) {
476 Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
478 shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
479 audio->make_silent ();
481 list<shared_ptr<Piece> > ov = overlaps (time, time + length, has_audio);
486 bool all_referenced = true;
487 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
488 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (i->content);
489 if (i->content->audio && (!dcp_content || !dcp_content->reference_audio ())) {
490 /* There is audio content which is not from a DCP or not set to be referenced */
491 all_referenced = false;
495 if (all_referenced && !_play_referenced) {
496 return shared_ptr<AudioBuffers> ();
499 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
501 DCPOMATIC_ASSERT (i->content->audio);
502 shared_ptr<AudioDecoder> decoder = i->decoder->audio;
503 DCPOMATIC_ASSERT (decoder);
505 /* The time that we should request from the content */
506 DCPTime request = time - DCPTime::from_seconds (i->content->audio->delay() / 1000.0);
507 Frame request_frames = length_frames;
509 if (request < DCPTime ()) {
510 /* We went off the start of the content, so we will need to offset
511 the stuff we get back.
514 request_frames += request.frames_round (_film->audio_frame_rate ());
515 if (request_frames < 0) {
518 request = DCPTime ();
521 Frame const content_frame = dcp_to_resampled_audio (i, request);
523 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams ()) {
525 if (j->channels() == 0) {
526 /* Some content (e.g. DCPs) can have streams with no channels */
530 /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
531 ContentAudio all = decoder->get (j, content_frame, request_frames, accurate);
534 if (i->content->audio->gain() != 0) {
535 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
536 gain->apply_gain (i->content->audio->gain ());
541 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
542 dcp_mapped->make_silent ();
543 AudioMapping map = j->mapping ();
544 for (int i = 0; i < map.input_channels(); ++i) {
545 for (int j = 0; j < _film->audio_channels(); ++j) {
546 if (map.get (i, j) > 0) {
547 dcp_mapped->accumulate_channel (
557 if (_audio_processor) {
558 dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ());
561 all.audio = dcp_mapped;
563 audio->accumulate_frames (
565 content_frame - all.frame,
566 offset.frames_round (_film->audio_frame_rate()),
567 min (Frame (all.audio->frames()), request_frames)
576 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
578 DCPTime s = t - piece->content->position ();
579 s = min (piece->content->length_after_trim(), s);
580 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
582 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
583 then convert that ContentTime to frames at the content's rate. However this fails for
584 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
585 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
587 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
589 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
593 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
595 /* See comment in dcp_to_content_video */
596 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
597 return max (DCPTime (), d + piece->content->position ());
601 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
603 DCPTime s = t - piece->content->position ();
604 s = min (piece->content->length_after_trim(), s);
605 /* See notes in dcp_to_content_video */
606 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
610 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
612 DCPTime s = t - piece->content->position ();
613 s = min (piece->content->length_after_trim(), s);
614 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
618 Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
620 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
623 /** @param burnt true to return only subtitles to be burnt, false to return only
624 * subtitles that should not be burnt. This parameter will be ignored if
625 * _always_burn_subtitles is true; in this case, all subtitles will be returned.
628 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate)
630 list<shared_ptr<Piece> > subs = overlaps (time, time + length, has_subtitle);
632 PlayerSubtitles ps (time, length);
634 for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
635 if (!(*j)->content->subtitle->use () || (!_always_burn_subtitles && (burnt != (*j)->content->subtitle->burn ()))) {
639 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> ((*j)->content);
640 if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) {
644 shared_ptr<SubtitleDecoder> subtitle_decoder = (*j)->decoder->subtitle;
645 ContentTime const from = dcp_to_content_subtitle (*j, time);
646 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
647 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
649 list<ContentImageSubtitle> image = subtitle_decoder->get_image (ContentTimePeriod (from, to), starting, accurate);
650 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
652 /* Apply content's subtitle offsets */
653 i->sub.rectangle.x += (*j)->content->subtitle->x_offset ();
654 i->sub.rectangle.y += (*j)->content->subtitle->y_offset ();
656 /* Apply content's subtitle scale */
657 i->sub.rectangle.width *= (*j)->content->subtitle->x_scale ();
658 i->sub.rectangle.height *= (*j)->content->subtitle->y_scale ();
660 /* Apply a corrective translation to keep the subtitle centred after that scale */
661 i->sub.rectangle.x -= i->sub.rectangle.width * ((*j)->content->subtitle->x_scale() - 1);
662 i->sub.rectangle.y -= i->sub.rectangle.height * ((*j)->content->subtitle->y_scale() - 1);
664 ps.image.push_back (i->sub);
667 list<ContentTextSubtitle> text = subtitle_decoder->get_text (ContentTimePeriod (from, to), starting, accurate);
668 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
669 BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
670 s.set_h_position (s.h_position() + (*j)->content->subtitle->x_offset ());
671 s.set_v_position (s.v_position() + (*j)->content->subtitle->y_offset ());
672 float const xs = (*j)->content->subtitle->x_scale();
673 float const ys = (*j)->content->subtitle->y_scale();
674 float size = s.size();
676 /* Adjust size to express the common part of the scaling;
677 e.g. if xs = ys = 0.5 we scale size by 2.
679 if (xs > 1e-5 && ys > 1e-5) {
680 size *= 1 / min (1 / xs, 1 / ys);
684 /* Then express aspect ratio changes */
685 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
686 s.set_aspect_adjust (xs / ys);
688 s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000));
689 s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000));
690 ps.text.push_back (s);
691 ps.add_fonts ((*j)->content->subtitle->fonts ());
699 list<shared_ptr<Font> >
700 Player::get_subtitle_fonts ()
702 if (!_have_valid_pieces) {
706 list<shared_ptr<Font> > fonts;
707 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
708 if (p->content->subtitle) {
709 /* XXX: things may go wrong if there are duplicate font IDs
710 with different font files.
712 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
713 copy (f.begin(), f.end(), back_inserter (fonts));
720 /** Set this player never to produce any video data */
722 Player::set_ignore_video ()
724 _ignore_video = true;
727 /** Set this player never to produce any audio data */
729 Player::set_ignore_audio ()
731 _ignore_audio = true;
734 /** Set whether or not this player should always burn text subtitles into the image,
735 * regardless of the content settings.
736 * @param burn true to always burn subtitles, false to obey content settings.
739 Player::set_always_burn_subtitles (bool burn)
741 _always_burn_subtitles = burn;
748 _have_valid_pieces = false;
752 Player::set_play_referenced ()
754 _play_referenced = true;
755 _have_valid_pieces = false;
758 list<ReferencedReelAsset>
759 Player::get_reel_assets ()
761 list<ReferencedReelAsset> a;
763 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
764 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
769 scoped_ptr<DCPDecoder> decoder;
771 decoder.reset (new DCPDecoder (j, _film->log(), false));
777 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
778 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
779 if (j->reference_video ()) {
781 ReferencedReelAsset (
783 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate()))
788 if (j->reference_audio ()) {
790 ReferencedReelAsset (
792 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate()))
797 if (j->reference_subtitle ()) {
798 DCPOMATIC_ASSERT (k->main_subtitle ());
800 ReferencedReelAsset (
802 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate()))
807 /* Assume that main picture duration is the length of the reel */
808 offset += k->main_picture()->duration ();
815 list<shared_ptr<Piece> >
816 Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
818 if (!_have_valid_pieces) {
822 list<shared_ptr<Piece> > overlaps;
823 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
824 if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
825 overlaps.push_back (i);