Main MRPT website > C++ reference
MRPT logo

SparseMatrix.h

Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 //
00006 // Eigen is free software; you can redistribute it and/or
00007 // modify it under the terms of the GNU Lesser General Public
00008 // License as published by the Free Software Foundation; either
00009 // version 3 of the License, or (at your option) any later version.
00010 //
00011 // Alternatively, you can redistribute it and/or
00012 // modify it under the terms of the GNU General Public License as
00013 // published by the Free Software Foundation; either version 2 of
00014 // the License, or (at your option) any later version.
00015 //
00016 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
00017 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00018 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
00019 // GNU General Public License for more details.
00020 //
00021 // You should have received a copy of the GNU Lesser General Public
00022 // License and a copy of the GNU General Public License along with
00023 // Eigen. If not, see <http://www.gnu.org/licenses/>.
00024 
00025 #ifndef EIGEN_SPARSEMATRIX_H
00026 #define EIGEN_SPARSEMATRIX_H
00027 
00028 /** \ingroup Sparse_Module
00029   *
00030   * \class SparseMatrix
00031   *
00032   * \brief The main sparse matrix class
00033   *
00034   * This class implements a sparse matrix using the very common compressed row/column storage
00035   * scheme.
00036   *
00037   * \param _Scalar the scalar type, i.e. the type of the coefficients
00038   * \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility
00039   *                 is RowMajor. The default is 0 which means column-major.
00040   * \param _Index the type of the indices. Default is \c int.
00041   *
00042   * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
00043   *
00044   */
00045 
00046 namespace internal {
00047 template<typename _Scalar, int _Options, typename _Index>
00048 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
00049 {
00050   typedef _Scalar Scalar;
00051   typedef _Index Index;
00052   typedef Sparse StorageKind;
00053   typedef MatrixXpr XprKind;
00054   enum {
00055     RowsAtCompileTime = Dynamic,
00056     ColsAtCompileTime = Dynamic,
00057     MaxRowsAtCompileTime = Dynamic,
00058     MaxColsAtCompileTime = Dynamic,
00059     Flags = _Options | NestByRefBit | LvalueBit,
00060     CoeffReadCost = NumTraits<Scalar>::ReadCost,
00061     SupportedAccessPatterns = InnerRandomAccessPattern
00062   };
00063 };
00064 } // end namespace internal
00065 
00066 template<typename _Scalar, int _Options, typename _Index>
00067 class SparseMatrix
00068   : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
00069 {
00070   public:
00071     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
00072     using Base::operator=;
00073     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
00074     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
00075     // FIXME: why are these operator already alvailable ???
00076     // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=)
00077     // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=)
00078 
00079     typedef MappedSparseMatrix<Scalar,Flags> Map;
00080     using Base::IsRowMajor;
00081     typedef CompressedStorage<Scalar,Index> Storage;
00082     enum {
00083       Options = _Options
00084     };
00085 
00086   protected:
00087 
00088     typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
00089 
00090     Index m_outerSize;
00091     Index m_innerSize;
00092     Index* m_outerIndex;
00093     CompressedStorage<Scalar,Index> m_data;
00094 
00095   public:
00096 
00097     inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
00098     inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
00099 
00100     inline Index innerSize() const { return m_innerSize; }
00101     inline Index outerSize() const { return m_outerSize; }
00102     inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
00103 
00104     inline const Scalar* _valuePtr() const { return &m_data.value(0); }
00105     inline Scalar* _valuePtr() { return &m_data.value(0); }
00106 
00107     inline const Index* _innerIndexPtr() const { return &m_data.index(0); }
00108     inline Index* _innerIndexPtr() { return &m_data.index(0); }
00109 
00110     inline const Index* _outerIndexPtr() const { return m_outerIndex; }
00111     inline Index* _outerIndexPtr() { return m_outerIndex; }
00112 
00113     inline Storage& data() { return m_data; }
00114     inline const Storage& data() const { return m_data; }
00115 
00116     inline Scalar coeff(Index row, Index col) const
00117     {
00118       const Index outer = IsRowMajor ? row : col;
00119       const Index inner = IsRowMajor ? col : row;
00120       return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
00121     }
00122 
00123     inline Scalar& coeffRef(Index row, Index col)
00124     {
00125       const Index outer = IsRowMajor ? row : col;
00126       const Index inner = IsRowMajor ? col : row;
00127 
00128       Index start = m_outerIndex[outer];
00129       Index end = m_outerIndex[outer+1];
00130       eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
00131       eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
00132       const Index p = m_data.searchLowerIndex(start,end-1,inner);
00133       eigen_assert((p<end) && (m_data.index(p)==inner) && "coeffRef cannot be called on a zero coefficient");
00134       return m_data.value(p);
00135     }
00136 
00137   public:
00138 
00139     class InnerIterator;
00140 
00141     /** Removes all non zeros */
00142     inline void setZero()
00143     {
00144       m_data.clear();
00145       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00146     }
00147 
00148     /** \returns the number of non zero coefficients */
00149     inline Index nonZeros() const  { return static_cast<Index>(m_data.size()); }
00150 
00151     /** Preallocates \a reserveSize non zeros */
00152     inline void reserve(Index reserveSize)
00153     {
00154       m_data.reserve(reserveSize);
00155     }
00156 
00157     //--- low level purely coherent filling ---
00158 
00159     /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
00160       * - the nonzero does not already exist
00161       * - the new coefficient is the last one according to the storage order
00162       *
00163       * Before filling a given inner vector you must call the statVec(Index) function.
00164       *
00165       * After an insertion session, you should call the finalize() function.
00166       *
00167       * \sa insert, insertBackByOuterInner, startVec */
00168     inline Scalar& insertBack(Index row, Index col)
00169     {
00170       return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
00171     }
00172 
00173     /** \sa insertBack, startVec */
00174     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
00175     {
00176       eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
00177       eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
00178       Index p = m_outerIndex[outer+1];
00179       ++m_outerIndex[outer+1];
00180       m_data.append(0, inner);
00181       return m_data.value(p);
00182     }
00183 
00184     /** \warning use it only if you know what you are doing */
00185     inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
00186     {
00187       Index p = m_outerIndex[outer+1];
00188       ++m_outerIndex[outer+1];
00189       m_data.append(0, inner);
00190       return m_data.value(p);
00191     }
00192 
00193     /** \sa insertBack, insertBackByOuterInner */
00194     inline void startVec(Index outer)
00195     {
00196       eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
00197       eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
00198       m_outerIndex[outer+1] = m_outerIndex[outer];
00199     }
00200 
00201     //---
00202 
00203     /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
00204       * The non zero coefficient must \b not already exist.
00205       *
00206       * \warning This function can be extremely slow if the non zero coefficients
00207       * are not inserted in a coherent order.
00208       *
00209       * After an insertion session, you should call the finalize() function.
00210       */
00211     EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
00212     {
00213       const Index outer = IsRowMajor ? row : col;
00214       const Index inner = IsRowMajor ? col : row;
00215 
00216       Index previousOuter = outer;
00217       if (m_outerIndex[outer+1]==0)
00218       {
00219         // we start a new inner vector
00220         while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
00221         {
00222           m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
00223           --previousOuter;
00224         }
00225         m_outerIndex[outer+1] = m_outerIndex[outer];
00226       }
00227 
00228       // here we have to handle the tricky case where the outerIndex array
00229       // starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g.,
00230       // the 2nd inner vector...
00231       bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
00232                     && (size_t(m_outerIndex[outer+1]) == m_data.size());
00233 
00234       size_t startId = m_outerIndex[outer];
00235       // FIXME let's make sure sizeof(long int) == sizeof(size_t)
00236       size_t p = m_outerIndex[outer+1];
00237       ++m_outerIndex[outer+1];
00238 
00239       float reallocRatio = 1;
00240       if (m_data.allocatedSize()<=m_data.size())
00241       {
00242         // if there is no preallocated memory, let's reserve a minimum of 32 elements
00243         if (m_data.size()==0)
00244         {
00245           m_data.reserve(32);
00246         }
00247         else
00248         {
00249           // we need to reallocate the data, to reduce multiple reallocations
00250           // we use a smart resize algorithm based on the current filling ratio
00251           // in addition, we use float to avoid integers overflows
00252           float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
00253           reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
00254           // furthermore we bound the realloc ratio to:
00255           //   1) reduce multiple minor realloc when the matrix is almost filled
00256           //   2) avoid to allocate too much memory when the matrix is almost empty
00257           reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f);
00258         }
00259       }
00260       m_data.resize(m_data.size()+1,reallocRatio);
00261 
00262       if (!isLastVec)
00263       {
00264         if (previousOuter==-1)
00265         {
00266           // oops wrong guess.
00267           // let's correct the outer offsets
00268           for (Index k=0; k<=(outer+1); ++k)
00269             m_outerIndex[k] = 0;
00270           Index k=outer+1;
00271           while(m_outerIndex[k]==0)
00272             m_outerIndex[k++] = 1;
00273           while (k<=m_outerSize && m_outerIndex[k]!=0)
00274             m_outerIndex[k++]++;
00275           p = 0;
00276           --k;
00277           k = m_outerIndex[k]-1;
00278           while (k>0)
00279           {
00280             m_data.index(k) = m_data.index(k-1);
00281             m_data.value(k) = m_data.value(k-1);
00282             k--;
00283           }
00284         }
00285         else
00286         {
00287           // we are not inserting into the last inner vec
00288           // update outer indices:
00289           Index j = outer+2;
00290           while (j<=m_outerSize && m_outerIndex[j]!=0)
00291             m_outerIndex[j++]++;
00292           --j;
00293           // shift data of last vecs:
00294           Index k = m_outerIndex[j]-1;
00295           while (k>=Index(p))
00296           {
00297             m_data.index(k) = m_data.index(k-1);
00298             m_data.value(k) = m_data.value(k-1);
00299             k--;
00300           }
00301         }
00302       }
00303 
00304       while ( (p > startId) && (m_data.index(p-1) > inner) )
00305       {
00306         m_data.index(p) = m_data.index(p-1);
00307         m_data.value(p) = m_data.value(p-1);
00308         --p;
00309       }
00310 
00311       m_data.index(p) = inner;
00312       return (m_data.value(p) = 0);
00313     }
00314 
00315 
00316 
00317 
00318     /** Must be called after inserting a set of non zero entries.
00319       */
00320     inline void finalize()
00321     {
00322       Index size = static_cast<Index>(m_data.size());
00323       Index i = m_outerSize;
00324       // find the last filled column
00325       while (i>=0 && m_outerIndex[i]==0)
00326         --i;
00327       ++i;
00328       while (i<=m_outerSize)
00329       {
00330         m_outerIndex[i] = size;
00331         ++i;
00332       }
00333     }
00334 
00335     /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
00336     void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
00337     {
00338       prune(default_prunning_func(reference,epsilon));
00339     }
00340     
00341     /** Suppress all nonzeros which do not satisfy the predicate \a keep.
00342       * The functor type \a KeepFunc must implement the following function:
00343       * \code
00344       * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
00345       * \endcode
00346       * \sa prune(Scalar,RealScalar)
00347       */
00348     template<typename KeepFunc>
00349     void prune(const KeepFunc& keep = KeepFunc())
00350     {
00351       Index k = 0;
00352       for(Index j=0; j<m_outerSize; ++j)
00353       {
00354         Index previousStart = m_outerIndex[j];
00355         m_outerIndex[j] = k;
00356         Index end = m_outerIndex[j+1];
00357         for(Index i=previousStart; i<end; ++i)
00358         {
00359           if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
00360           {
00361             m_data.value(k) = m_data.value(i);
00362             m_data.index(k) = m_data.index(i);
00363             ++k;
00364           }
00365         }
00366       }
00367       m_outerIndex[m_outerSize] = k;
00368       m_data.resize(k,0);
00369     }
00370 
00371     /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
00372       * \sa resizeNonZeros(Index), reserve(), setZero()
00373       */
00374     void resize(Index rows, Index cols)
00375     {
00376       const Index outerSize = IsRowMajor ? rows : cols;
00377       m_innerSize = IsRowMajor ? cols : rows;
00378       m_data.clear();
00379       if (m_outerSize != outerSize || m_outerSize==0)
00380       {
00381         delete[] m_outerIndex;
00382         m_outerIndex = new Index [outerSize+1];
00383         m_outerSize = outerSize;
00384       }
00385       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00386     }
00387 
00388     /** Low level API
00389       * Resize the nonzero vector to \a size */
00390     void resizeNonZeros(Index size)
00391     {
00392       m_data.resize(size);
00393     }
00394 
00395     /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
00396     inline SparseMatrix()
00397       : m_outerSize(-1), m_innerSize(0), m_outerIndex(0)
00398     {
00399       resize(0, 0);
00400     }
00401 
00402     /** Constructs a \a rows \c x \a cols empty matrix */
00403     inline SparseMatrix(Index rows, Index cols)
00404       : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
00405     {
00406       resize(rows, cols);
00407     }
00408 
00409     /** Constructs a sparse matrix from the sparse expression \a other */
00410     template<typename OtherDerived>
00411     inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
00412       : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
00413     {
00414       *this = other.derived();
00415     }
00416 
00417     /** Copy constructor */
00418     inline SparseMatrix(const SparseMatrix& other)
00419       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0)
00420     {
00421       *this = other.derived();
00422     }
00423 
00424     /** Swap the content of two sparse matrices of same type (optimization) */
00425     inline void swap(SparseMatrix& other)
00426     {
00427       //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
00428       std::swap(m_outerIndex, other.m_outerIndex);
00429       std::swap(m_innerSize, other.m_innerSize);
00430       std::swap(m_outerSize, other.m_outerSize);
00431       m_data.swap(other.m_data);
00432     }
00433 
00434     inline SparseMatrix& operator=(const SparseMatrix& other)
00435     {
00436 //       std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n";
00437       if (other.isRValue())
00438       {
00439         swap(other.const_cast_derived());
00440       }
00441       else
00442       {
00443         resize(other.rows(), other.cols());
00444         memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
00445         m_data = other.m_data;
00446       }
00447       return *this;
00448     }
00449 
00450     #ifndef EIGEN_PARSED_BY_DOXYGEN
00451     template<typename Lhs, typename Rhs>
00452     inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
00453     {
00454       return Base::operator=(product);
00455     }
00456     
00457     template<typename OtherDerived>
00458     EIGEN_STRONG_INLINE SparseMatrix& operator=(const ReturnByValue<OtherDerived>& func)
00459     {
00460       return Base::operator=(func);
00461     }
00462     #endif
00463 
00464     template<typename OtherDerived>
00465     EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
00466     {
00467       const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
00468       if (needToTranspose)
00469       {
00470         // two passes algorithm:
00471         //  1 - compute the number of coeffs per dest inner vector
00472         //  2 - do the actual copy/eval
00473         // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
00474         typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
00475         typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
00476         OtherCopy otherCopy(other.derived());
00477 
00478         resize(other.rows(), other.cols());
00479         Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
00480         // pass 1
00481         // FIXME the above copy could be merged with that pass
00482         for (Index j=0; j<otherCopy.outerSize(); ++j)
00483           for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
00484             ++m_outerIndex[it.index()];
00485 
00486         // prefix sum
00487         Index count = 0;
00488         VectorXi positions(outerSize());
00489         for (Index j=0; j<outerSize(); ++j)
00490         {
00491           Index tmp = m_outerIndex[j];
00492           m_outerIndex[j] = count;
00493           positions[j] = count;
00494           count += tmp;
00495         }
00496         m_outerIndex[outerSize()] = count;
00497         // alloc
00498         m_data.resize(count);
00499         // pass 2
00500         for (Index j=0; j<otherCopy.outerSize(); ++j)
00501         {
00502           for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
00503           {
00504             Index pos = positions[it.index()]++;
00505             m_data.index(pos) = j;
00506             m_data.value(pos) = it.value();
00507           }
00508         }
00509         return *this;
00510       }
00511       else
00512       {
00513         // there is no special optimization
00514         return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
00515       }
00516     }
00517 
00518     friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
00519     {
00520       EIGEN_DBG_SPARSE(
00521         s << "Nonzero entries:\n";
00522         for (Index i=0; i<m.nonZeros(); ++i)
00523         {
00524           s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
00525         }
00526         s << std::endl;
00527         s << std::endl;
00528         s << "Column pointers:\n";
00529         for (Index i=0; i<m.outerSize(); ++i)
00530         {
00531           s << m.m_outerIndex[i] << " ";
00532         }
00533         s << " $" << std::endl;
00534         s << std::endl;
00535       );
00536       s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
00537       return s;
00538     }
00539 
00540     /** Destructor */
00541     inline ~SparseMatrix()
00542     {
00543       delete[] m_outerIndex;
00544     }
00545 
00546     /** Overloaded for performance */
00547     Scalar sum() const;
00548 
00549   public:
00550 
00551     /** \deprecated use setZero() and reserve()
00552       * Initializes the filling process of \c *this.
00553       * \param reserveSize approximate number of nonzeros
00554       * Note that the matrix \c *this is zero-ed.
00555       */
00556     EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
00557     {
00558       setZero();
00559       m_data.reserve(reserveSize);
00560     }
00561 
00562     /** \deprecated use insert()
00563       * Like fill() but with random inner coordinates.
00564       */
00565     EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
00566     {
00567       return insert(row,col);
00568     }
00569 
00570     /** \deprecated use insert()
00571       */
00572     EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
00573     {
00574       const Index outer = IsRowMajor ? row : col;
00575       const Index inner = IsRowMajor ? col : row;
00576 
00577       if (m_outerIndex[outer+1]==0)
00578       {
00579         // we start a new inner vector
00580         Index i = outer;
00581         while (i>=0 && m_outerIndex[i]==0)
00582         {
00583           m_outerIndex[i] = m_data.size();
00584           --i;
00585         }
00586         m_outerIndex[outer+1] = m_outerIndex[outer];
00587       }
00588       else
00589       {
00590         eigen_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
00591       }
00592 //       std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
00593       assert(size_t(m_outerIndex[outer+1]) == m_data.size());
00594       Index p = m_outerIndex[outer+1];
00595       ++m_outerIndex[outer+1];
00596 
00597       m_data.append(0, inner);
00598       return m_data.value(p);
00599     }
00600 
00601     /** \deprecated use finalize */
00602     EIGEN_DEPRECATED void endFill() { finalize(); }
00603 
00604 private:
00605   struct default_prunning_func {
00606     default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
00607     inline bool operator() (const Index&, const Index&, const Scalar& value) const
00608     {
00609       return !internal::isMuchSmallerThan(value, reference, epsilon);
00610     }
00611     Scalar reference;
00612     RealScalar epsilon;
00613   };
00614 };
00615 
00616 template<typename Scalar, int _Options, typename _Index>
00617 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
00618 {
00619   public:
00620     InnerIterator(const SparseMatrix& mat, Index outer)
00621       : m_values(mat._valuePtr()), m_indices(mat._innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1])
00622     {}
00623 
00624     inline InnerIterator& operator++() { m_id++; return *this; }
00625 
00626     inline const Scalar& value() const { return m_values[m_id]; }
00627     inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
00628 
00629     inline Index index() const { return m_indices[m_id]; }
00630     inline Index outer() const { return m_outer; }
00631     inline Index row() const { return IsRowMajor ? m_outer : index(); }
00632     inline Index col() const { return IsRowMajor ? index() : m_outer; }
00633 
00634     inline operator bool() const { return (m_id < m_end); }
00635 
00636   protected:
00637     const Scalar* m_values;
00638     const Index* m_indices;
00639     const Index m_outer;
00640     Index m_id;
00641     const Index m_end;
00642 };
00643 
00644 #endif // EIGEN_SPARSEMATRIX_H



Page generated by Doxygen 1.7.3 for MRPT 0.9.4 SVN:exported at Tue Jan 25 21:56:31 UTC 2011