[ VIGRA Homepage | Function Index | Class Index | Namespaces | File List | Main Page ]

multi_handle.hxx
1/************************************************************************/
2/* */
3/* Copyright 2011-2014 by Ullrich Koethe */
4/* */
5/* This file is part of the VIGRA computer vision library. */
6/* The VIGRA Website is */
7/* http://hci.iwr.uni-heidelberg.de/vigra/ */
8/* Please direct questions, bug reports, and contributions to */
9/* ullrich.koethe@iwr.uni-heidelberg.de or */
10/* vigra@informatik.uni-hamburg.de */
11/* */
12/* Permission is hereby granted, free of charge, to any person */
13/* obtaining a copy of this software and associated documentation */
14/* files (the "Software"), to deal in the Software without */
15/* restriction, including without limitation the rights to use, */
16/* copy, modify, merge, publish, distribute, sublicense, and/or */
17/* sell copies of the Software, and to permit persons to whom the */
18/* Software is furnished to do so, subject to the following */
19/* conditions: */
20/* */
21/* The above copyright notice and this permission notice shall be */
22/* included in all copies or substantial portions of the */
23/* Software. */
24/* */
25/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
26/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
27/* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
28/* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
29/* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
30/* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
31/* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
32/* OTHER DEALINGS IN THE SOFTWARE. */
33/* */
34/************************************************************************/
35
36#ifndef MULTI_HANDLE_HXX
37#define MULTI_HANDLE_HXX
38
39#include "multi_fwd.hxx"
40#include "metaprogramming.hxx"
41#include "multi_shape.hxx"
42
43namespace vigra {
44
45template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX=Handle::index>
46struct CoupledHandleCast;
47
48#ifndef _MSC_VER // Visual Studio doesn't like these forward declarations
49template <unsigned int TARGET_INDEX, class Handle>
50typename CoupledHandleCast<TARGET_INDEX, Handle>::reference
51get(Handle & handle);
52
53template <unsigned int TARGET_INDEX, class Handle>
54typename CoupledHandleCast<TARGET_INDEX, Handle>::const_reference
55get(Handle const & handle);
56#endif
57
58/** \addtogroup MultiIteratorGroup
59*/
60//@{
61
62 /**
63 Handle class, used by CoupledScanOrderIterator as the value type to simultaneously itearate over multiple images.
64 */
65template <class T, class NEXT>
67: public NEXT
68{
69public:
70 typedef NEXT base_type;
72
73 static const int index = NEXT::index + 1; // index of this member of the chain
74 static const unsigned int dimensions = NEXT::dimensions;
75
76 typedef T value_type;
77 typedef T * pointer;
78 typedef T const * const_pointer;
79 typedef T & reference;
80 typedef T const & const_reference;
81 typedef typename base_type::shape_type shape_type;
82
84 : base_type(),
85 pointer_(),
86 strides_()
87 {}
88
89 template <class NEXT1>
90 CoupledHandle(CoupledHandle<T, NEXT1> const & h, NEXT const & next)
91 : base_type(next),
92 pointer_(h.pointer_),
93 strides_(h.strides_)
94 {}
95
96 CoupledHandle(const_pointer p, shape_type const & strides, NEXT const & next)
97 : base_type(next),
98 pointer_(const_cast<pointer>(p)),
99 strides_(strides)
100 {}
101
102 template <class Stride>
103 CoupledHandle(MultiArrayView<dimensions, T, Stride> const & v, NEXT const & next)
104 : base_type(next),
105 pointer_(const_cast<pointer>(v.data())),
106 strides_(v.stride())
107 {
108 vigra_precondition(v.shape() == this->shape(), "createCoupledIterator(): shape mismatch.");
109 }
110
111 inline void incDim(int dim)
112 {
113 pointer_ += strides_[dim];
114 base_type::incDim(dim);
115 }
116
117 inline void decDim(int dim)
118 {
119 pointer_ -= strides_[dim];
120 base_type::decDim(dim);
121 }
122
123 inline void addDim(int dim, MultiArrayIndex d)
124 {
125 pointer_ += d*strides_[dim];
126 base_type::addDim(dim, d);
127 }
128
129 inline void add(shape_type const & d)
130 {
131 pointer_ += dot(d, strides_);
132 base_type::add(d);
133 }
134
135 template<int DIMENSION>
136 inline void increment()
137 {
138 pointer_ += strides_[DIMENSION];
139 base_type::template increment<DIMENSION>();
140 }
141
142 template<int DIMENSION>
143 inline void decrement()
144 {
145 pointer_ -= strides_[DIMENSION];
146 base_type::template decrement<DIMENSION>();
147 }
148
149 // TODO: test if making the above a default case of the this hurts performance
150 template<int DIMENSION>
151 inline void increment(MultiArrayIndex offset)
152 {
153 pointer_ += offset*strides_[DIMENSION];
154 base_type::template increment<DIMENSION>(offset);
155 }
156
157 template<int DIMENSION>
158 inline void decrement(MultiArrayIndex offset)
159 {
160 pointer_ -= offset*strides_[DIMENSION];
161 base_type::template decrement<DIMENSION>(offset);
162 }
163
164 void restrictToSubarray(shape_type const & start, shape_type const & end)
165 {
166 pointer_ += dot(start, strides_);
167 base_type::restrictToSubarray(start, end);
168 }
169
170 // ptr access
171 reference operator*()
172 {
173 return *pointer_;
174 }
175
176 const_reference operator*() const
177 {
178 return *pointer_;
179 }
180
181 pointer operator->()
182 {
183 return pointer_;
184 }
185
186 const_pointer operator->() const
187 {
188 return pointer_;
189 }
190
191 pointer ptr()
192 {
193 return pointer_;
194 }
195
196 const_pointer ptr() const
197 {
198 return pointer_;
199 }
200
201 shape_type const & strides() const
202 {
203 return strides_;
204 }
205
207 arrayView() const
208 {
209 return MultiArrayView<dimensions, T>(this->shape(), strides(), ptr() - dot(this->point(), strides()));
210 }
211
212 template <unsigned int TARGET_INDEX>
213 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
214 get()
215 {
216 return vigra::get<TARGET_INDEX>(*this);
217 }
218
219 template <unsigned int TARGET_INDEX>
220 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
221 get() const
222 {
223 return vigra::get<TARGET_INDEX>(*this);
224 }
225
226 // NOTE: dangerous function - only use it when you know what you are doing
227 void internal_reset(const_pointer p)
228 {
229 pointer_ = const_cast<pointer>(p);
230 }
231
232 pointer pointer_;
233 shape_type strides_;
234};
235
236 // CoupledHandle holding the current coordinate
237 // (always the end of a CoupledHandle chain)
238template <int N>
240{
241public:
242 static const unsigned int index = 0; // index of this member of the chain
243 static const unsigned int dimensions = N;
244
245 typedef typename MultiArrayShape<N>::type value_type;
246 typedef value_type const * pointer;
247 typedef value_type const * const_pointer;
248 typedef value_type const & reference;
249 typedef value_type const & const_reference;
250 typedef value_type shape_type;
251 typedef CoupledHandle<value_type, void> self_type;
252
254 : point_(),
255 shape_(),
256 scanOrderIndex_()
257 {}
258
259 CoupledHandle(value_type const & shape)
260 : point_(),
261 shape_(shape),
262 scanOrderIndex_()
263 {}
264
265 CoupledHandle(typename MultiArrayShape<N+1>::type const & shape)
266 : point_(),
267 shape_(shape.begin()),
268 scanOrderIndex_()
269 {}
270
271 inline void incDim(int dim)
272 {
273 ++point_[dim];
274 }
275
276 inline void decDim(int dim)
277 {
278 --point_[dim];
279 }
280
281 inline void addDim(int dim, MultiArrayIndex d)
282 {
283 point_[dim] += d;
284 }
285
286 inline void add(shape_type const & d)
287 {
288 point_ += d;
289 }
290
291 template<int DIMENSION>
292 inline void increment()
293 {
294 ++point_[DIMENSION];
295 }
296
297 template<int DIMENSION>
298 inline void decrement()
299 {
300 --point_[DIMENSION];
301 }
302
303 // TODO: test if making the above a default case of the this hurts performance
304 template<int DIMENSION>
305 inline void increment(MultiArrayIndex offset)
306 {
307 point_[DIMENSION] += offset;
308 }
309
310 template<int DIMENSION>
311 inline void decrement(MultiArrayIndex offset)
312 {
313 point_[DIMENSION] -= offset;
314 }
315
316 void restrictToSubarray(shape_type const & start, shape_type const & end)
317 {
318 point_ = shape_type();
319 shape_ = end - start;
320 scanOrderIndex_ = 0;
321 }
322
323 inline void incrementIndex()
324 {
325 ++scanOrderIndex_;
326 }
327
328 inline void decrementIndex()
329 {
330 --scanOrderIndex_;
331 }
332
333 inline void incrementIndex(MultiArrayIndex offset)
334 {
335 scanOrderIndex_ += offset;
336 }
337
338 inline void decrementIndex(MultiArrayIndex offset)
339 {
340 scanOrderIndex_ -= offset;
341 }
342
343 // access
344 MultiArrayIndex scanOrderIndex() const
345 {
346 return scanOrderIndex_;
347 }
348
349 // access
350 const_reference point() const
351 {
352 return point_;
353 }
354
355 // access
356 const_reference shape() const
357 {
358 return shape_;
359 }
360
361 const_reference operator*() const
362 {
363 return point_;
364 }
365
366 const_pointer operator->() const
367 {
368 return &point_;
369 }
370
371 const_pointer ptr() const
372 {
373 return &point_;
374 }
375
376 unsigned int borderType() const
377 {
378 return detail::BorderTypeImpl<N>::exec(point_, shape_);
379 }
380
381 template <unsigned int TARGET_INDEX>
382 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
383 get()
384 {
385 return vigra::get<TARGET_INDEX>(*this);
386 }
387
388 template <unsigned int TARGET_INDEX>
389 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
390 get() const
391 {
392 return vigra::get<TARGET_INDEX>(*this);
393 }
394
395 // NOTE: dangerous function - only use it when you know what you are doing
396 void internal_reset(value_type const & point)
397 {
398 point_ = point;
399 }
400
401 value_type point_, shape_;
402 MultiArrayIndex scanOrderIndex_;
403};
404
405 // CoupledHandle for multi-band data
406template <class T, class NEXT>
407class CoupledHandle<Multiband<T>, NEXT>
408: public NEXT
409{
410public:
411 typedef NEXT base_type;
412 typedef CoupledHandle<Multiband<T>, NEXT> self_type;
413
414 static const unsigned int index = NEXT::index + 1; // index of this member of the chain
415 static const unsigned int dimensions = NEXT::dimensions;
416
417 typedef MultiArrayView<1, T, StridedArrayTag> value_type;
418 typedef value_type * pointer;
419 typedef value_type const * const_pointer;
420 typedef value_type & reference;
421 typedef value_type const & const_reference;
422 typedef typename base_type::shape_type shape_type;
423
424 CoupledHandle()
425 : base_type(),
426 view_(),
427 strides_()
428 {}
429
430 template <class NEXT1>
431 CoupledHandle(CoupledHandle<Multiband<T>, NEXT1> const & h, NEXT const & next)
432 : base_type(next),
433 view_(h.view_),
434 strides_(h.strides_)
435 {}
436
437 CoupledHandle(const_reference p, shape_type const & strides, NEXT const & next)
438 : base_type(next),
439 view_(p),
440 strides_(strides)
441 {}
442
443 template <class Stride>
444 CoupledHandle(MultiArrayView<dimensions+1, Multiband<T>, Stride> const & v, NEXT const & next)
445 : base_type(next),
446 view_(v.bindInner(shape_type())),
447 strides_(v.bindOuter(0).stride())
448 {
449 vigra_precondition(v.bindOuter(0).shape() == this->shape(), "createCoupledIterator(): shape mismatch.");
450 }
451
452 inline void incDim(int dim)
453 {
454 view_.unsafePtr() += strides_[dim];
455 base_type::incDim(dim);
456 }
457
458 inline void decDim(int dim)
459 {
460 view_.unsafePtr() -= strides_[dim];
461 base_type::decDim(dim);
462 }
463
464 inline void addDim(int dim, MultiArrayIndex d)
465 {
466 view_.unsafePtr() += d*strides_[dim];
467 base_type::addDim(dim, d);
468 }
469
470 inline void add(shape_type const & d)
471 {
472 view_.unsafePtr() += dot(d, strides_);
473 base_type::add(d);
474 }
475
476 template<int DIMENSION>
477 inline void increment()
478 {
479 view_.unsafePtr() += strides_[DIMENSION];
480 base_type::template increment<DIMENSION>();
481 }
482
483 template<int DIMENSION>
484 inline void decrement()
485 {
486 view_.unsafePtr() -= strides_[DIMENSION];
487 base_type::template decrement<DIMENSION>();
488 }
489
490 // TODO: test if making the above a default case of the this hurts performance
491 template<int DIMENSION>
492 inline void increment(MultiArrayIndex offset)
493 {
494 view_.unsafePtr() += offset*strides_[DIMENSION];
495 base_type::template increment<DIMENSION>(offset);
496 }
497
498 template<int DIMENSION>
499 inline void decrement(MultiArrayIndex offset)
500 {
501 view_.unsafePtr() -= offset*strides_[DIMENSION];
502 base_type::template decrement<DIMENSION>(offset);
503 }
504
505 void restrictToSubarray(shape_type const & start, shape_type const & end)
506 {
507 view_.unsafePtr() += dot(start, strides_);
508 base_type::restrictToSubarray(start, end);
509 }
510
511 // ptr access
512 reference operator*()
513 {
514 return view_;
515 }
516
517 const_reference operator*() const
518 {
519 return view_;
520 }
521
522 pointer operator->()
523 {
524 return &view_;
525 }
526
527 const_pointer operator->() const
528 {
529 return &view_;
530 }
531
532 pointer ptr()
533 {
534 return &view_;
535 }
536
537 const_pointer ptr() const
538 {
539 return &view_;
540 }
541
542 shape_type const & strides() const
543 {
544 return strides_;
545 }
546
547 MultiArrayView<dimensions+1, Multiband<T> >
548 arrayView() const
549 {
550 typedef MultiArrayView<dimensions+1, T> View;
551 typename View::difference_type vshape(SkipInitialization), vstride(SkipInitialization);
552 vshape.template subarray<0, dimensions>() = this->shape();
553 vstride.template subarray<0, dimensions>() = strides();
554 vshape[dimensions] = view_.shape(0);
555 vstride[dimensions] = view_.stride(0);
556 return View(vshape, vstride, view_.data() - dot(this->point(), strides())).multiband();
557 }
558
559 template <unsigned int TARGET_INDEX>
560 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
561 get()
562 {
563 return vigra::get<TARGET_INDEX>(*this);
564 }
565
566 template <unsigned int TARGET_INDEX>
567 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
568 get() const
569 {
570 return vigra::get<TARGET_INDEX>(*this);
571 }
572
573 template <class U>
574 void internal_reset(U const &)
575 {
576 vigra_fail("CoupledHandle<Multiband<T>>::internal_reset(): not implemented.");
577 }
578
579 value_type view_;
580 shape_type strides_;
581};
582
583 // helper class for CoupledHandle for CunkedArray
584template <unsigned int N, class T>
585class IteratorChunkHandle
586{
587 public:
588 typedef ChunkedArray<N, T> array_type;
589 typedef typename MultiArrayShape<N>::type shape_type;
590
591 IteratorChunkHandle()
592 : offset_(),
593 chunk_(0)
594 {}
595
596 IteratorChunkHandle(shape_type const & offset)
597 : offset_(offset),
598 chunk_(0)
599 {}
600
601 IteratorChunkHandle(IteratorChunkHandle const & other)
602 : offset_(other.offset_),
603 chunk_(0)
604 {}
605
606 IteratorChunkHandle & operator=(IteratorChunkHandle const & other)
607 {
608 offset_ = other.offset_;
609 chunk_ = 0;
610 return *this;
611 }
612
613 shape_type offset_;
614 SharedChunkHandle<N, T> * chunk_;
615};
616
617 /* CoupledHandle for CunkedArray
618
619 The handle must store a pointer to a chunk because the chunk knows
620 about memory menagement, and to an array view because it knows about
621 subarrays and slices.
622
623 Perhaps we can reduce this to a single pointer or otherwise reduce
624 the handle memory to make it faster?
625 */
626template <class U, class NEXT>
627class CoupledHandle<ChunkedMemory<U>, NEXT>
628: public NEXT,
629 public IteratorChunkHandle<NEXT::dimensions, typename UnqualifiedType<U>::type>
630{
631public:
632 typedef typename UnqualifiedType<U>::type T;
633 typedef NEXT base_type;
634 typedef IteratorChunkHandle<NEXT::dimensions, T> base_type2;
635 typedef CoupledHandle<ChunkedMemory<U>, NEXT> self_type;
636
637 static const unsigned int index = NEXT::index + 1; // index of this member of the chain
638 static const unsigned int dimensions = NEXT::dimensions;
639
640 typedef typename IfBool<UnqualifiedType<U>::isConst,
641 ChunkedArrayBase<dimensions, T> const,
642 ChunkedArrayBase<dimensions, T> >::type array_type;
643 typedef detail::ChunkShape<dimensions, T> chunk_shape;
644 typedef T value_type;
645 typedef U * pointer;
646 typedef value_type const * const_pointer;
647 typedef U & reference;
648 typedef value_type const & const_reference;
649 typedef typename base_type::shape_type shape_type;
650
651 CoupledHandle()
652 : base_type(),
653 base_type2(),
654 pointer_(),
655 strides_(),
656 upper_bound_(),
657 array_()
658 {}
659
660 CoupledHandle(CoupledHandle const & other)
661 : base_type(other),
662 base_type2(other),
663 pointer_(other.pointer_),
664 strides_(other.strides_),
665 upper_bound_(other.upper_bound_),
666 array_(other.array_)
667 {
668 if(array_)
669 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
670 }
671
672 CoupledHandle(array_type const & array, NEXT const & next)
673 : base_type(next),
674 base_type2(),
675 pointer_(),
676 array_(const_cast<array_type*>(&array))
677 {
678 if(array_)
679 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
680 }
681
682 ~CoupledHandle()
683 {
684 // deref the present chunk
685 if(array_)
686 array_->unrefChunk(this);
687 }
688
689 CoupledHandle & operator=(CoupledHandle const & other)
690 {
691 if(this != &other)
692 {
693 // deref the present chunk
694 if(array_)
695 array_->unrefChunk(this);
696 base_type::operator=(other);
697 base_type2::operator=(other);
698 array_ = other.array_;
699 if(array_)
700 {
701 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
702 }
703 else
704 {
705 pointer_ = other.pointer_;
706 strides_ = other.strides_;
707 upper_bound_ = other.upper_bound_;
708 }
709 }
710 return *this;
711 }
712
713 using base_type::point;
714 using base_type::shape;
715
716 inline void incDim(int dim)
717 {
718 base_type::incDim(dim);
719 pointer_ += strides_[dim];
720 if(point()[dim] == upper_bound_[dim])
721 {
722 // if(point()[dim] < shape()[dim])
723 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
724 }
725 }
726
727 inline void decDim(int dim)
728 {
729 base_type::decDim(dim);
730 pointer_ -= strides_[dim];
731 if(point()[dim] < upper_bound_[dim] - array_->chunk_shape_[dim])
732 {
733 // if(point()[dim] >= 0)
734 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
735 }
736 }
737
738 inline void addDim(int dim, MultiArrayIndex d)
739 {
740 base_type::addDim(dim, d);
741 if(point()[dim] < shape()[dim] && point()[dim] >= 0)
742 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
743 }
744
745 inline void add(shape_type const & d)
746 {
747 base_type::add(d);
748 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
749 }
750
751 template<int DIMENSION>
752 inline void increment()
753 {
754 // incDim(DIMENSION);
755 base_type::template increment<DIMENSION>();
756 pointer_ += strides_[DIMENSION];
757 if(point()[DIMENSION] == upper_bound_[DIMENSION])
758 {
759 if(point()[DIMENSION] > shape()[DIMENSION])
760 // this invariant check prevents the compiler from optimizing stupidly
761 // (it makes a difference of a factor of 2!)
762 vigra_invariant(false, "CoupledHandle<ChunkedMemory<T>>: internal error.");
763 else
764 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
765 }
766 }
767
768 template<int DIMENSION>
769 inline void decrement()
770 {
771 // decDim(DIMENSION);
772 base_type::template decrement<DIMENSION>();
773 pointer_ -= strides_[DIMENSION];
774 if(point()[DIMENSION] < upper_bound_[DIMENSION] - array_->chunk_shape_[DIMENSION])
775 {
776 if(point()[DIMENSION] < -1)
777 // this invariant check prevents the compiler from optimizing stupidly
778 // (it makes a difference of a factor of 2!)
779 vigra_invariant(false, "CoupledHandle<ChunkedMemory<T>>: internal error.");
780 else
781 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
782 }
783 }
784
785 template<int DIMENSION>
786 inline void increment(MultiArrayIndex d)
787 {
788 addDim(DIMENSION, d);
789 }
790
791 template<int DIMENSION>
792 inline void decrement(MultiArrayIndex d)
793 {
794 addDim(DIMENSION, -d);
795 }
796
797 void restrictToSubarray(shape_type const & start, shape_type const & end)
798 {
799 base_type::restrictToSubarray(start, end);
800 this->offset_ += start;
801 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
802 }
803
804 // ptr access
805 reference operator*()
806 {
807 return *pointer_;
808 }
809
810 const_reference operator*() const
811 {
812 return *pointer_;
813 }
814
815 pointer operator->()
816 {
817 return pointer_;
818 }
819
820 const_pointer operator->() const
821 {
822 return pointer_;
823 }
824
825 pointer ptr()
826 {
827 return pointer_;
828 }
829
830 const_pointer ptr() const
831 {
832 return pointer_;
833 }
834
835 array_type const &
836 arrayView() const
837 {
838 return *array_;
839 }
840
841 template <unsigned int TARGET_INDEX>
842 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
843 get()
844 {
845 return vigra::get<TARGET_INDEX>(*this);
846 }
847
848 template <unsigned int TARGET_INDEX>
849 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
850 get() const
851 {
852 return vigra::get<TARGET_INDEX>(*this);
853 }
854
855 template <class V>
856 void internal_reset(V const &)
857 {
858 vigra_fail("CoupledHandle<ChunkedMemory<T>>::internal_reset(): not implemented.");
859 }
860
861 pointer pointer_;
862 shape_type strides_, upper_bound_;
863 array_type * array_;
864};
865
866 // meta-programming helper classes to implement 'get<INDEX>(CoupledHandle)'
867template <unsigned TARGET_INDEX>
868struct Error__CoupledHandle_index_out_of_range;
869
870namespace detail {
871
872template <unsigned TARGET_INDEX, class Handle, bool isValid, unsigned int INDEX=Handle::index>
873struct CoupledHandleCastImpl
874{
875 typedef typename CoupledHandleCastImpl<TARGET_INDEX, typename Handle::base_type, isValid>::type type;
876 typedef typename type::value_type value_type;
877 typedef typename type::reference reference;
878 typedef typename type::const_reference const_reference;
879};
880
881template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX>
882struct CoupledHandleCastImpl<TARGET_INDEX, Handle, false, INDEX>
883{
884 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> type;
885 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> value_type;
886 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> reference;
887 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> const_reference;
888};
889
890template <unsigned TARGET_INDEX, class Handle>
891struct CoupledHandleCastImpl<TARGET_INDEX, Handle, true, TARGET_INDEX>
892{
893 typedef Handle type;
894 typedef typename type::value_type value_type;
895 typedef typename type::reference reference;
896 typedef typename type::const_reference const_reference;
897};
898
899} // namespace detail
900
901template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX>
902struct CoupledHandleCast
903: public detail::CoupledHandleCastImpl<TARGET_INDEX, Handle, (TARGET_INDEX <= INDEX), INDEX>
904{};
905
906template <unsigned int TARGET_INDEX, class Handle>
907inline
908typename CoupledHandleCast<TARGET_INDEX, Handle>::type &
909cast(Handle & handle)
910{
911 return handle;
912}
913
914template <unsigned int TARGET_INDEX, class Handle>
915inline
916typename CoupledHandleCast<TARGET_INDEX, Handle>::type const &
917cast(Handle const & handle)
918{
919 return handle;
920}
921
922 /** Returns reference to the element in the band of the handle with index TARGET_INDEX.
923 */
924template <unsigned int TARGET_INDEX, class Handle>
925inline
926typename CoupledHandleCast<TARGET_INDEX, Handle>::reference
927get(Handle & handle)
928{
929 return *cast<TARGET_INDEX>(handle);
930}
931
932 /** Returns a constant reference to the element in the band of the handle with index TARGET_INDEX.
933 */
934template <unsigned int TARGET_INDEX, class Handle>
935inline
936typename CoupledHandleCast<TARGET_INDEX, Handle>::const_reference
937get(Handle const & handle)
938{
939 return *cast<TARGET_INDEX>(handle);
940}
941
942 // meta-programming helper classes to infer the type of
943 // a CoupledHandle for a set of arrays
944template <unsigned int N, class List>
945struct ComposeCoupledHandle;
946
947template <unsigned int N, class T, class TAIL>
948struct ComposeCoupledHandle<N, TypeList<T, TAIL> >
949{
950 typedef typename ComposeCoupledHandle<N, TAIL>::type BaseType;
951 typedef typename MultiArrayShape<N>::type shape_type;
952 typedef CoupledHandle<T, BaseType> type;
953
954 template <class S>
955 type exec(MultiArrayView<N, T, S> const & m,
956 shape_type const & start, shape_type const & end,
957 BaseType const & base)
958 {
959 return type(m.subarray(start, end).data(), m.stride(), base);
960 }
961
962 template <class S>
963 type exec(MultiArrayView<N, T, S> const & m, BaseType const & base)
964 {
965 return type(m.data(), m.stride(), base);
966 }
967};
968
969template <unsigned int N>
970struct ComposeCoupledHandle<N, void>
971{
972 typedef typename MultiArrayShape<N>::type shape_type;
973 typedef CoupledHandle<shape_type, void> type;
974
975 type exec(shape_type const & shape)
976 {
977 return type(shape);
978 }
979
980 type exec(shape_type const & start, shape_type const & end)
981 {
982 return type(end-start);
983 }
984};
985
986
987template <unsigned int N, class T1=void, class T2=void, class T3=void, class T4=void, class T5=void>
988struct CoupledHandleType
989{
990 // reverse the order to get the desired index order
991 typedef typename MakeTypeList<T5, T4, T3, T2, T1>::type TypeList;
992 typedef typename ComposeCoupledHandle<N, TypeList>::type type;
993};
994
995template <unsigned int N, class T1, class T2, class T3, class T4, class T5>
996struct CoupledHandleType<N, Multiband<T1>, T2, T3, T4, T5>
997{
998 // reverse the order to get the desired index order
999 typedef typename MakeTypeList<T5, T4, T3, T2, Multiband<T1> >::type TypeList;
1000 typedef typename ComposeCoupledHandle<N-1, TypeList>::type type;
1001};
1002
1003 // meta-programming helper classes to implement 'zip(iterator1, iterator2)'
1004template <class A, class B>
1005struct ZipCoupledHandles;
1006
1007template <class A, class Head, class Tail>
1008struct ZipCoupledHandles<A, CoupledHandle<Head, Tail> >
1009{
1010 typedef typename ZipCoupledHandles<A, Tail>::type Next;
1011 typedef CoupledHandle<Head, Next> type;
1012
1013 static type construct(A const & a, CoupledHandle<Head, Tail> const & h)
1014 {
1015 return type(h, ZipCoupledHandles<A, Tail>::construct(a, (Tail const &)h));
1016 }
1017};
1018
1019template <class A, class Shape>
1020struct ZipCoupledHandles<A, CoupledHandle<Shape, void> >
1021{
1022 typedef A type;
1023
1024 static type construct(A const & a, CoupledHandle<Shape, void> const &)
1025 {
1026 return a;
1027 }
1028};
1029
1030 // allow an iterator that uses CoupledHandle to specialize its
1031 // dereferencing functions, such that
1032 // '*iter' returns a referenc to the current point if
1033 // the handle is just a coordinate handle
1034 // '*iter' returns a reference to the current data element
1035 // if the handle referes to just one array
1036 // '*iter' returns a reference to the handle itself if it refers to
1037 // several arrays simultaneously (i.e. is actualy a coupled handle)
1038template <class Handle, unsigned int INDEX=Handle::index>
1039struct CoupledHandleTraits
1040{
1041 typedef Handle value_type;
1042 typedef Handle & reference;
1043 typedef Handle const & const_reference;
1044 typedef Handle * pointer;
1045 typedef Handle const * const_pointer;
1046
1047 static reference dereference(Handle & h)
1048 {
1049 return h;
1050 }
1051
1052 static const_reference dereference(Handle const & h)
1053 {
1054 return h;
1055 }
1056};
1057
1058template <class Handle>
1059struct CoupledHandleTraits<Handle, 0>
1060{
1061 typedef typename Handle::value_type value_type;
1062 typedef typename Handle::reference reference;
1063 typedef typename Handle::const_reference const_reference;
1064 typedef typename Handle::pointer pointer;
1065 typedef typename Handle::const_pointer const_pointer;
1066
1067 static reference dereference(Handle & h)
1068 {
1069 return *h;
1070 }
1071
1072 static const_reference dereference(Handle const & h)
1073 {
1074 return *h;
1075 }
1076};
1077
1078template <class Handle>
1079struct CoupledHandleTraits<Handle, 1>
1080{
1081 typedef typename Handle::value_type value_type;
1082 typedef typename Handle::reference reference;
1083 typedef typename Handle::const_reference const_reference;
1084 typedef typename Handle::pointer pointer;
1085 typedef typename Handle::const_pointer const_pointer;
1086
1087 static reference dereference(Handle & h)
1088 {
1089 return *h;
1090 }
1091
1092 static const_reference dereference(Handle const & h)
1093 {
1094 return *h;
1095 }
1096};
1097
1098
1099//@}
1100
1101} // namespace vigra
1102
1103#endif /* MULTI_HANDLE_HXX */
Definition multi_handle.hxx:68
TinyVector< MultiArrayIndex, N > type
Definition multi_shape.hxx:272
Class for a single RGB value.
Definition rgbvalue.hxx:128
Class for fixed size vectors.
Definition tinyvector.hxx:1008
LookupTag< TAG, A >::result_type get(A const &a)
Definition accumulator.hxx:2942
NormTraits< T >::SquaredNormType dot(const MultiArrayView< 2, T, C1 > &x, const MultiArrayView< 2, T, C2 > &y)
Definition matrix.hxx:1342
std::ptrdiff_t MultiArrayIndex
Definition multi_fwd.hxx:60

© Ullrich Köthe (ullrich.koethe@iwr.uni-heidelberg.de)
Heidelberg Collaboratory for Image Processing, University of Heidelberg, Germany

html generated using doxygen and Python
vigra 1.11.1