Main MRPT website > C++ reference
MRPT logo

SparseVector.h

Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 //
00006 // Eigen is free software; you can redistribute it and/or
00007 // modify it under the terms of the GNU Lesser General Public
00008 // License as published by the Free Software Foundation; either
00009 // version 3 of the License, or (at your option) any later version.
00010 //
00011 // Alternatively, you can redistribute it and/or
00012 // modify it under the terms of the GNU General Public License as
00013 // published by the Free Software Foundation; either version 2 of
00014 // the License, or (at your option) any later version.
00015 //
00016 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
00017 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00018 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
00019 // GNU General Public License for more details.
00020 //
00021 // You should have received a copy of the GNU Lesser General Public
00022 // License and a copy of the GNU General Public License along with
00023 // Eigen. If not, see <http://www.gnu.org/licenses/>.
00024 
00025 #ifndef EIGEN_SPARSEVECTOR_H
00026 #define EIGEN_SPARSEVECTOR_H
00027 
00028 /** \class SparseVector
00029   *
00030   * \brief a sparse vector class
00031   *
00032   * \param _Scalar the scalar type, i.e. the type of the coefficients
00033   *
00034   * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
00035   *
00036   */
00037 
00038 namespace internal {
00039 template<typename _Scalar, int _Options, typename _Index>
00040 struct traits<SparseVector<_Scalar, _Options, _Index> >
00041 {
00042   typedef _Scalar Scalar;
00043   typedef _Index Index;
00044   typedef Sparse StorageKind;
00045   typedef MatrixXpr XprKind;
00046   enum {
00047     IsColVector = _Options & RowMajorBit ? 0 : 1,
00048 
00049     RowsAtCompileTime = IsColVector ? Dynamic : 1,
00050     ColsAtCompileTime = IsColVector ? 1 : Dynamic,
00051     MaxRowsAtCompileTime = RowsAtCompileTime,
00052     MaxColsAtCompileTime = ColsAtCompileTime,
00053     Flags = _Options | NestByRefBit | LvalueBit,
00054     CoeffReadCost = NumTraits<Scalar>::ReadCost,
00055     SupportedAccessPatterns = InnerRandomAccessPattern
00056   };
00057 };
00058 }
00059 
00060 template<typename _Scalar, int _Options, typename _Index>
00061 class SparseVector
00062   : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
00063 {
00064   public:
00065     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
00066     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
00067     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
00068 //     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =)
00069 
00070   protected:
00071   public:
00072 
00073     typedef SparseMatrixBase<SparseVector> SparseBase;
00074     enum { IsColVector = internal::traits<SparseVector>::IsColVector };
00075     
00076     enum {
00077       Options = _Options
00078     };
00079 
00080     CompressedStorage<Scalar,Index> m_data;
00081     Index m_size;
00082 
00083     CompressedStorage<Scalar,Index>& _data() { return m_data; }
00084     CompressedStorage<Scalar,Index>& _data() const { return m_data; }
00085 
00086   public:
00087 
00088     EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
00089     EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
00090     EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
00091     EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
00092     EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { eigen_assert(j==0); return m_size; }
00093 
00094     EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
00095     EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
00096 
00097     EIGEN_STRONG_INLINE const Index* _innerIndexPtr() const { return &m_data.index(0); }
00098     EIGEN_STRONG_INLINE Index* _innerIndexPtr() { return &m_data.index(0); }
00099 
00100     inline Scalar coeff(Index row, Index col) const
00101     {
00102       eigen_assert((IsColVector ? col : row)==0);
00103       return coeff(IsColVector ? row : col);
00104     }
00105     inline Scalar coeff(Index i) const { return m_data.at(i); }
00106 
00107     inline Scalar& coeffRef(Index row, Index col)
00108     {
00109       eigen_assert((IsColVector ? col : row)==0);
00110       return coeff(IsColVector ? row : col);
00111     }
00112 
00113     /** \returns a reference to the coefficient value at given index \a i
00114       * This operation involes a log(rho*size) binary search. If the coefficient does not
00115       * exist yet, then a sorted insertion into a sequential buffer is performed.
00116       *
00117       * This insertion might be very costly if the number of nonzeros above \a i is large.
00118       */
00119     inline Scalar& coeffRef(Index i)
00120     {
00121       return m_data.atWithInsertion(i);
00122     }
00123 
00124   public:
00125 
00126     class InnerIterator;
00127 
00128     inline void setZero() { m_data.clear(); }
00129 
00130     /** \returns the number of non zero coefficients */
00131     inline Index nonZeros() const  { return static_cast<Index>(m_data.size()); }
00132 
00133     inline void startVec(Index outer)
00134     {
00135       eigen_assert(outer==0);
00136     }
00137 
00138     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
00139     {
00140       eigen_assert(outer==0);
00141       return insertBack(inner);
00142     }
00143     inline Scalar& insertBack(Index i)
00144     {
00145       m_data.append(0, i);
00146       return m_data.value(m_data.size()-1);
00147     }
00148 
00149     inline Scalar& insert(Index row, Index col)
00150     {
00151       Index inner = IsColVector ? row : col;
00152       Index outer = IsColVector ? col : row;
00153       eigen_assert(outer==0);
00154       return insert(inner);
00155     }
00156     Scalar& insert(Index i)
00157     {
00158       Index startId = 0;
00159       Index p = m_data.size() - 1;
00160       // TODO smart realloc
00161       m_data.resize(p+2,1);
00162 
00163       while ( (p >= startId) && (m_data.index(p) > i) )
00164       {
00165         m_data.index(p+1) = m_data.index(p);
00166         m_data.value(p+1) = m_data.value(p);
00167         --p;
00168       }
00169       m_data.index(p+1) = i;
00170       m_data.value(p+1) = 0;
00171       return m_data.value(p+1);
00172     }
00173 
00174     /**
00175       */
00176     inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
00177 
00178 
00179     inline void finalize() {}
00180 
00181     void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
00182     {
00183       m_data.prune(reference,epsilon);
00184     }
00185 
00186     void resize(Index rows, Index cols)
00187     {
00188       eigen_assert(rows==1 || cols==1);
00189       resize(IsColVector ? rows : cols);
00190     }
00191 
00192     void resize(Index newSize)
00193     {
00194       m_size = newSize;
00195       m_data.clear();
00196     }
00197 
00198     void resizeNonZeros(Index size) { m_data.resize(size); }
00199 
00200     inline SparseVector() : m_size(0) { resize(0); }
00201 
00202     inline SparseVector(Index size) : m_size(0) { resize(size); }
00203 
00204     inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
00205 
00206     template<typename OtherDerived>
00207     inline SparseVector(const MatrixBase<OtherDerived>& other)
00208       : m_size(0)
00209     {
00210       *this = other.derived();
00211     }
00212 
00213     template<typename OtherDerived>
00214     inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
00215       : m_size(0)
00216     {
00217       *this = other.derived();
00218     }
00219 
00220     inline SparseVector(const SparseVector& other)
00221       : m_size(0)
00222     {
00223       *this = other.derived();
00224     }
00225 
00226     inline void swap(SparseVector& other)
00227     {
00228       std::swap(m_size, other.m_size);
00229       m_data.swap(other.m_data);
00230     }
00231 
00232     inline SparseVector& operator=(const SparseVector& other)
00233     {
00234       if (other.isRValue())
00235       {
00236         swap(other.const_cast_derived());
00237       }
00238       else
00239       {
00240         resize(other.size());
00241         m_data = other.m_data;
00242       }
00243       return *this;
00244     }
00245 
00246     template<typename OtherDerived>
00247     inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
00248     {
00249       if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
00250         return Base::operator=(other.transpose());
00251       else
00252         return Base::operator=(other);
00253     }
00254 
00255     #ifndef EIGEN_PARSED_BY_DOXYGEN
00256     template<typename Lhs, typename Rhs>
00257     inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
00258     {
00259       return Base::operator=(product);
00260     }
00261     #endif
00262 
00263 //       const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
00264 //       if (needToTranspose)
00265 //       {
00266 //         // two passes algorithm:
00267 //         //  1 - compute the number of coeffs per dest inner vector
00268 //         //  2 - do the actual copy/eval
00269 //         // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
00270 //         typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
00271 //         OtherCopy otherCopy(other.derived());
00272 //         typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
00273 //
00274 //         resize(other.rows(), other.cols());
00275 //         Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
00276 //         // pass 1
00277 //         // FIXME the above copy could be merged with that pass
00278 //         for (int j=0; j<otherCopy.outerSize(); ++j)
00279 //           for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
00280 //             ++m_outerIndex[it.index()];
00281 //
00282 //         // prefix sum
00283 //         int count = 0;
00284 //         VectorXi positions(outerSize());
00285 //         for (int j=0; j<outerSize(); ++j)
00286 //         {
00287 //           int tmp = m_outerIndex[j];
00288 //           m_outerIndex[j] = count;
00289 //           positions[j] = count;
00290 //           count += tmp;
00291 //         }
00292 //         m_outerIndex[outerSize()] = count;
00293 //         // alloc
00294 //         m_data.resize(count);
00295 //         // pass 2
00296 //         for (int j=0; j<otherCopy.outerSize(); ++j)
00297 //           for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
00298 //           {
00299 //             int pos = positions[it.index()]++;
00300 //             m_data.index(pos) = j;
00301 //             m_data.value(pos) = it.value();
00302 //           }
00303 //
00304 //         return *this;
00305 //       }
00306 //       else
00307 //       {
00308 //         // there is no special optimization
00309 //         return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
00310 //       }
00311 //     }
00312 
00313     friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
00314     {
00315       for (Index i=0; i<m.nonZeros(); ++i)
00316         s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
00317       s << std::endl;
00318       return s;
00319     }
00320 
00321     // this specialized version does not seems to be faster
00322 //     Scalar dot(const SparseVector& other) const
00323 //     {
00324 //       int i=0, j=0;
00325 //       Scalar res = 0;
00326 //       asm("#begindot");
00327 //       while (i<nonZeros() && j<other.nonZeros())
00328 //       {
00329 //         if (m_data.index(i)==other.m_data.index(j))
00330 //         {
00331 //           res += m_data.value(i) * internal::conj(other.m_data.value(j));
00332 //           ++i; ++j;
00333 //         }
00334 //         else if (m_data.index(i)<other.m_data.index(j))
00335 //           ++i;
00336 //         else
00337 //           ++j;
00338 //       }
00339 //       asm("#enddot");
00340 //       return res;
00341 //     }
00342 
00343     /** Destructor */
00344     inline ~SparseVector() {}
00345 
00346     /** Overloaded for performance */
00347     Scalar sum() const;
00348 
00349   public:
00350 
00351     /** \deprecated use setZero() and reserve() */
00352     EIGEN_DEPRECATED void startFill(Index reserve)
00353     {
00354       setZero();
00355       m_data.reserve(reserve);
00356     }
00357 
00358     /** \deprecated use insertBack(Index,Index) */
00359     EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
00360     {
00361       eigen_assert(r==0 || c==0);
00362       return fill(IsColVector ? r : c);
00363     }
00364 
00365     /** \deprecated use insertBack(Index) */
00366     EIGEN_DEPRECATED Scalar& fill(Index i)
00367     {
00368       m_data.append(0, i);
00369       return m_data.value(m_data.size()-1);
00370     }
00371 
00372     /** \deprecated use insert(Index,Index) */
00373     EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
00374     {
00375       eigen_assert(r==0 || c==0);
00376       return fillrand(IsColVector ? r : c);
00377     }
00378 
00379     /** \deprecated use insert(Index) */
00380     EIGEN_DEPRECATED Scalar& fillrand(Index i)
00381     {
00382       return insert(i);
00383     }
00384 
00385     /** \deprecated use finalize() */
00386     EIGEN_DEPRECATED void endFill() {}
00387 };
00388 
00389 template<typename Scalar, int _Options, typename _Index>
00390 class SparseVector<Scalar,_Options,_Index>::InnerIterator
00391 {
00392   public:
00393     InnerIterator(const SparseVector& vec, Index outer=0)
00394       : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
00395     {
00396       eigen_assert(outer==0);
00397     }
00398 
00399     InnerIterator(const CompressedStorage<Scalar,Index>& data)
00400       : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
00401     {}
00402 
00403     template<unsigned int Added, unsigned int Removed>
00404     InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, Index )
00405       : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
00406     {}
00407 
00408     inline InnerIterator& operator++() { m_id++; return *this; }
00409 
00410     inline Scalar value() const { return m_data.value(m_id); }
00411     inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
00412 
00413     inline Index index() const { return m_data.index(m_id); }
00414     inline Index row() const { return IsColVector ? index() : 0; }
00415     inline Index col() const { return IsColVector ? 0 : index(); }
00416 
00417     inline operator bool() const { return (m_id < m_end); }
00418 
00419   protected:
00420     const CompressedStorage<Scalar,Index>& m_data;
00421     Index m_id;
00422     const Index m_end;
00423 };
00424 
00425 #endif // EIGEN_SPARSEVECTOR_H



Page generated by Doxygen 1.7.3 for MRPT 0.9.4 SVN:exported at Tue Jan 25 21:56:31 UTC 2011