SparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // Eigen is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 3 of the License, or (at your option) any later version.
10 //
11 // Alternatively, you can redistribute it and/or
12 // modify it under the terms of the GNU General Public License as
13 // published by the Free Software Foundation; either version 2 of
14 // the License, or (at your option) any later version.
15 //
16 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
17 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
19 // GNU General Public License for more details.
20 //
21 // You should have received a copy of the GNU Lesser General Public
22 // License and a copy of the GNU General Public License along with
23 // Eigen. If not, see <http://www.gnu.org/licenses/>.
24 
25 #ifndef EIGEN_SPARSEMATRIX_H
26 #define EIGEN_SPARSEMATRIX_H
27 
28 namespace Eigen {
29 
56 namespace internal {
57 template<typename _Scalar, int _Options, typename _Index>
58 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
59 {
60  typedef _Scalar Scalar;
61  typedef _Index Index;
62  typedef Sparse StorageKind;
63  typedef MatrixXpr XprKind;
64  enum {
65  RowsAtCompileTime = Dynamic,
66  ColsAtCompileTime = Dynamic,
67  MaxRowsAtCompileTime = Dynamic,
68  MaxColsAtCompileTime = Dynamic,
69  Flags = _Options | NestByRefBit | LvalueBit,
70  CoeffReadCost = NumTraits<Scalar>::ReadCost,
71  SupportedAccessPatterns = InnerRandomAccessPattern
72  };
73 };
74 
75 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
76 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
77 {
78  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
79  typedef typename nested<MatrixType>::type MatrixTypeNested;
80  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
81 
82  typedef _Scalar Scalar;
83  typedef Dense StorageKind;
84  typedef _Index Index;
85  typedef MatrixXpr XprKind;
86 
87  enum {
88  RowsAtCompileTime = Dynamic,
89  ColsAtCompileTime = 1,
90  MaxRowsAtCompileTime = Dynamic,
91  MaxColsAtCompileTime = 1,
92  Flags = 0,
93  CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
94  };
95 };
96 
97 } // end namespace internal
98 
99 template<typename _Scalar, int _Options, typename _Index>
101  : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
102 {
103  public:
107 
109  using Base::IsRowMajor;
110  typedef internal::CompressedStorage<Scalar,Index> Storage;
111  enum {
112  Options = _Options
113  };
114 
115  protected:
116 
118 
122  Index* m_innerNonZeros; // optional, if null then the data is compressed
124 
127 
128  public:
129 
131  inline bool isCompressed() const { return m_innerNonZeros==0; }
132 
134  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
136  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
137 
139  inline Index innerSize() const { return m_innerSize; }
141  inline Index outerSize() const { return m_outerSize; }
142 
146  inline const Scalar* valuePtr() const { return &m_data.value(0); }
150  inline Scalar* valuePtr() { return &m_data.value(0); }
151 
155  inline const Index* innerIndexPtr() const { return &m_data.index(0); }
159  inline Index* innerIndexPtr() { return &m_data.index(0); }
160 
164  inline const Index* outerIndexPtr() const { return m_outerIndex; }
168  inline Index* outerIndexPtr() { return m_outerIndex; }
169 
173  inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
177  inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
178 
180  inline Storage& data() { return m_data; }
182  inline const Storage& data() const { return m_data; }
183 
186  inline Scalar coeff(Index row, Index col) const
187  {
188  const Index outer = IsRowMajor ? row : col;
189  const Index inner = IsRowMajor ? col : row;
190  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
191  return m_data.atInRange(m_outerIndex[outer], end, inner);
192  }
193 
203  {
204  const Index outer = IsRowMajor ? row : col;
205  const Index inner = IsRowMajor ? col : row;
206 
207  Index start = m_outerIndex[outer];
208  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
209  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
210  if(end<=start)
211  return insert(row,col);
212  const Index p = m_data.searchLowerIndex(start,end-1,inner);
213  if((p<end) && (m_data.index(p)==inner))
214  return m_data.value(p);
215  else
216  return insert(row,col);
217  }
218 
232  {
233  if(isCompressed())
234  {
235  reserve(VectorXi::Constant(outerSize(), 2));
236  }
237  return insertUncompressed(row,col);
238  }
239 
240  public:
241 
242  class InnerIterator;
243  class ReverseInnerIterator;
244 
246  inline void setZero()
247  {
248  m_data.clear();
249  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
250  if(m_innerNonZeros)
251  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
252  }
253 
255  inline Index nonZeros() const
256  {
257  if(m_innerNonZeros)
258  return innerNonZeros().sum();
259  return static_cast<Index>(m_data.size());
260  }
261 
265  inline void reserve(Index reserveSize)
266  {
267  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
268  m_data.reserve(reserveSize);
269  }
270 
271  #ifdef EIGEN_PARSED_BY_DOXYGEN
272 
275  template<class SizesType>
276  inline void reserve(const SizesType& reserveSizes);
277  #else
278  template<class SizesType>
279  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
280  {
281  EIGEN_UNUSED_VARIABLE(enableif);
282  reserveInnerVectors(reserveSizes);
283  }
284  template<class SizesType>
285  inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif = typename SizesType::Scalar())
286  {
287  EIGEN_UNUSED_VARIABLE(enableif);
288  reserveInnerVectors(reserveSizes);
289  }
290  #endif // EIGEN_PARSED_BY_DOXYGEN
291  protected:
292  template<class SizesType>
293  inline void reserveInnerVectors(const SizesType& reserveSizes)
294  {
295 
296  if(isCompressed())
297  {
298  std::size_t totalReserveSize = 0;
299  // turn the matrix into non-compressed mode
301 
302  // temporarily use m_innerSizes to hold the new starting points.
303  Index* newOuterIndex = m_innerNonZeros;
304 
305  Index count = 0;
306  for(Index j=0; j<m_outerSize; ++j)
307  {
308  newOuterIndex[j] = count;
309  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
310  totalReserveSize += reserveSizes[j];
311  }
312  m_data.reserve(totalReserveSize);
313  std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
314  for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
315  {
316  ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
317  for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
318  {
319  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
320  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
321  }
322  previousOuterIndex = m_outerIndex[j];
323  m_outerIndex[j] = newOuterIndex[j];
324  m_innerNonZeros[j] = innerNNZ;
325  }
326  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
327 
328  m_data.resize(m_outerIndex[m_outerSize]);
329  }
330  else
331  {
332  Index* newOuterIndex = new Index[m_outerSize+1];
333  Index count = 0;
334  for(Index j=0; j<m_outerSize; ++j)
335  {
336  newOuterIndex[j] = count;
337  Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
338  Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
339  count += toReserve + m_innerNonZeros[j];
340  }
341  newOuterIndex[m_outerSize] = count;
342 
343  m_data.resize(count);
344  for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
345  {
346  std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
347  if(offset>0)
348  {
349  std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
350  for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
351  {
352  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
353  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
354  }
355  }
356  }
357 
358  std::swap(m_outerIndex, newOuterIndex);
359  delete[] newOuterIndex;
360  }
361 
362  }
363  public:
364 
365  //--- low level purely coherent filling ---
366 
378  {
379  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
380  }
381 
384  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
385  {
386  eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
387  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
388  Index p = m_outerIndex[outer+1];
389  ++m_outerIndex[outer+1];
390  m_data.append(0, inner);
391  return m_data.value(p);
392  }
393 
397  {
398  Index p = m_outerIndex[outer+1];
399  ++m_outerIndex[outer+1];
400  m_data.append(0, inner);
401  return m_data.value(p);
402  }
403 
406  inline void startVec(Index outer)
407  {
408  eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
409  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
410  m_outerIndex[outer+1] = m_outerIndex[outer];
411  }
412 
416  inline void finalize()
417  {
418  if(isCompressed())
419  {
420  Index size = static_cast<Index>(m_data.size());
421  Index i = m_outerSize;
422  // find the last filled column
423  while (i>=0 && m_outerIndex[i]==0)
424  --i;
425  ++i;
426  while (i<=m_outerSize)
427  {
428  m_outerIndex[i] = size;
429  ++i;
430  }
431  }
432  }
433 
434  //---
435 
436  template<typename InputIterators>
437  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
438 
439  void sumupDuplicates();
440 
441  //---
442 
446  {
447  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
448  }
449 
453  {
454  if(isCompressed())
455  return;
456 
457  Index oldStart = m_outerIndex[1];
459  for(Index j=1; j<m_outerSize; ++j)
460  {
461  Index nextOldStart = m_outerIndex[j+1];
462  std::ptrdiff_t offset = oldStart - m_outerIndex[j];
463  if(offset>0)
464  {
465  for(Index k=0; k<m_innerNonZeros[j]; ++k)
466  {
467  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
468  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
469  }
470  }
471  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
472  oldStart = nextOldStart;
473  }
474  delete[] m_innerNonZeros;
475  m_innerNonZeros = 0;
476  m_data.resize(m_outerIndex[m_outerSize]);
477  m_data.squeeze();
478  }
479 
481  void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
482  {
483  prune(default_prunning_func(reference,epsilon));
484  }
485 
493  template<typename KeepFunc>
494  void prune(const KeepFunc& keep = KeepFunc())
495  {
496  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
497  // TODO also implement a unit test
498  makeCompressed();
499 
500  Index k = 0;
501  for(Index j=0; j<m_outerSize; ++j)
502  {
503  Index previousStart = m_outerIndex[j];
504  m_outerIndex[j] = k;
505  Index end = m_outerIndex[j+1];
506  for(Index i=previousStart; i<end; ++i)
507  {
508  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
509  {
510  m_data.value(k) = m_data.value(i);
511  m_data.index(k) = m_data.index(i);
512  ++k;
513  }
514  }
515  }
517  m_data.resize(k,0);
518  }
519 
524  {
525  const Index outerSize = IsRowMajor ? rows : cols;
526  m_innerSize = IsRowMajor ? cols : rows;
527  m_data.clear();
528  if (m_outerSize != outerSize || m_outerSize==0)
529  {
530  delete[] m_outerIndex;
531  m_outerIndex = new Index [outerSize+1];
533  }
534  if(m_innerNonZeros)
535  {
536  delete[] m_innerNonZeros;
537  m_innerNonZeros = 0;
538  }
539  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
540  }
541 
545  {
546  // TODO remove this function
547  m_data.resize(size);
548  }
549 
551  const Diagonal<const SparseMatrix> diagonal() const { return *this; }
552 
554  inline SparseMatrix()
556  {
557  check_template_parameters();
558  resize(0, 0);
559  }
560 
564  {
565  check_template_parameters();
566  resize(rows, cols);
567  }
568 
570  template<typename OtherDerived>
573  {
574  check_template_parameters();
575  *this = other.derived();
576  }
577 
579  inline SparseMatrix(const SparseMatrix& other)
581  {
582  check_template_parameters();
583  *this = other.derived();
584  }
585 
588  inline void swap(SparseMatrix& other)
589  {
590  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
595  m_data.swap(other.m_data);
596  }
597 
598  inline SparseMatrix& operator=(const SparseMatrix& other)
599  {
600  if (other.isRValue())
601  {
602  swap(other.const_cast_derived());
603  }
604  else
605  {
606  initAssignment(other);
607  if(other.isCompressed())
608  {
609  memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
610  m_data = other.m_data;
611  }
612  else
613  {
614  Base::operator=(other);
615  }
616  }
617  return *this;
618  }
619 
620  #ifndef EIGEN_PARSED_BY_DOXYGEN
621  template<typename Lhs, typename Rhs>
622  inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
623  { return Base::operator=(product); }
624 
625  template<typename OtherDerived>
626  inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
627  { return Base::operator=(other.derived()); }
628 
629  template<typename OtherDerived>
630  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
631  { return Base::operator=(other.derived()); }
632  #endif
633 
634  template<typename OtherDerived>
636  {
637  initAssignment(other.derived());
638  const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
639  if (needToTranspose)
640  {
641  // two passes algorithm:
642  // 1 - compute the number of coeffs per dest inner vector
643  // 2 - do the actual copy/eval
644  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
645  typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
646  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
647  OtherCopy otherCopy(other.derived());
648 
650  // pass 1
651  // FIXME the above copy could be merged with that pass
652  for (Index j=0; j<otherCopy.outerSize(); ++j)
653  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
654  ++m_outerIndex[it.index()];
655 
656  // prefix sum
657  Index count = 0;
658  VectorXi positions(outerSize());
659  for (Index j=0; j<outerSize(); ++j)
660  {
661  Index tmp = m_outerIndex[j];
662  m_outerIndex[j] = count;
663  positions[j] = count;
664  count += tmp;
665  }
666  m_outerIndex[outerSize()] = count;
667  // alloc
668  m_data.resize(count);
669  // pass 2
670  for (Index j=0; j<otherCopy.outerSize(); ++j)
671  {
672  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
673  {
674  Index pos = positions[it.index()]++;
675  m_data.index(pos) = j;
676  m_data.value(pos) = it.value();
677  }
678  }
679  return *this;
680  }
681  else
682  {
683  // there is no special optimization
684  return Base::operator=(other.derived());
685  }
686  }
687 
688  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
689  {
691  s << "Nonzero entries:\n";
692  if(m.isCompressed())
693  for (Index i=0; i<m.nonZeros(); ++i)
694  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
695  else
696  for (Index i=0; i<m.outerSize(); ++i)
697  {
698  int p = m.m_outerIndex[i];
699  int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
700  Index k=p;
701  for (; k<pe; ++k)
702  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
703  for (; k<m.m_outerIndex[i+1]; ++k)
704  s << "(_,_) ";
705  }
706  s << std::endl;
707  s << std::endl;
708  s << "Outer pointers:\n";
709  for (Index i=0; i<m.outerSize(); ++i)
710  s << m.m_outerIndex[i] << " ";
711  s << " $" << std::endl;
712  if(!m.isCompressed())
713  {
714  s << "Inner non zeros:\n";
715  for (Index i=0; i<m.outerSize(); ++i)
716  s << m.m_innerNonZeros[i] << " ";
717  s << " $" << std::endl;
718  }
719  s << std::endl;
720  );
721  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
722  return s;
723  }
724 
726  inline ~SparseMatrix()
727  {
728  delete[] m_outerIndex;
729  delete[] m_innerNonZeros;
730  }
731 
732 #ifndef EIGEN_PARSED_BY_DOXYGEN
733 
734  Scalar sum() const;
735 #endif
736 
737 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
738 # include EIGEN_SPARSEMATRIX_PLUGIN
739 # endif
740 
741 protected:
742 
743  template<typename Other>
744  void initAssignment(const Other& other)
745  {
746  resize(other.rows(), other.cols());
747  if(m_innerNonZeros)
748  {
749  delete[] m_innerNonZeros;
750  m_innerNonZeros = 0;
751  }
752  }
753 
757  {
759 
760  const Index outer = IsRowMajor ? row : col;
761  const Index inner = IsRowMajor ? col : row;
762 
763  Index previousOuter = outer;
764  if (m_outerIndex[outer+1]==0)
765  {
766  // we start a new inner vector
767  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
768  {
769  m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
770  --previousOuter;
771  }
772  m_outerIndex[outer+1] = m_outerIndex[outer];
773  }
774 
775  // here we have to handle the tricky case where the outerIndex array
776  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
777  // the 2nd inner vector...
778  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
779  && (size_t(m_outerIndex[outer+1]) == m_data.size());
780 
781  size_t startId = m_outerIndex[outer];
782  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
783  size_t p = m_outerIndex[outer+1];
784  ++m_outerIndex[outer+1];
785 
786  float reallocRatio = 1;
787  if (m_data.allocatedSize()<=m_data.size())
788  {
789  // if there is no preallocated memory, let's reserve a minimum of 32 elements
790  if (m_data.size()==0)
791  {
792  m_data.reserve(32);
793  }
794  else
795  {
796  // we need to reallocate the data, to reduce multiple reallocations
797  // we use a smart resize algorithm based on the current filling ratio
798  // in addition, we use float to avoid integers overflows
799  float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
800  reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
801  // furthermore we bound the realloc ratio to:
802  // 1) reduce multiple minor realloc when the matrix is almost filled
803  // 2) avoid to allocate too much memory when the matrix is almost empty
804  reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
805  }
806  }
807  m_data.resize(m_data.size()+1,reallocRatio);
808 
809  if (!isLastVec)
810  {
811  if (previousOuter==-1)
812  {
813  // oops wrong guess.
814  // let's correct the outer offsets
815  for (Index k=0; k<=(outer+1); ++k)
816  m_outerIndex[k] = 0;
817  Index k=outer+1;
818  while(m_outerIndex[k]==0)
819  m_outerIndex[k++] = 1;
820  while (k<=m_outerSize && m_outerIndex[k]!=0)
821  m_outerIndex[k++]++;
822  p = 0;
823  --k;
824  k = m_outerIndex[k]-1;
825  while (k>0)
826  {
827  m_data.index(k) = m_data.index(k-1);
828  m_data.value(k) = m_data.value(k-1);
829  k--;
830  }
831  }
832  else
833  {
834  // we are not inserting into the last inner vec
835  // update outer indices:
836  Index j = outer+2;
837  while (j<=m_outerSize && m_outerIndex[j]!=0)
838  m_outerIndex[j++]++;
839  --j;
840  // shift data of last vecs:
841  Index k = m_outerIndex[j]-1;
842  while (k>=Index(p))
843  {
844  m_data.index(k) = m_data.index(k-1);
845  m_data.value(k) = m_data.value(k-1);
846  k--;
847  }
848  }
849  }
850 
851  while ( (p > startId) && (m_data.index(p-1) > inner) )
852  {
853  m_data.index(p) = m_data.index(p-1);
854  m_data.value(p) = m_data.value(p-1);
855  --p;
856  }
857 
858  m_data.index(p) = inner;
859  return (m_data.value(p) = 0);
860  }
861 
865  {
866  Index m_index;
867  Index m_value;
868  public:
869  typedef Index value_type;
871  : m_index(i), m_value(v)
872  {}
873 
874  Index operator[](Index i) const { return i==m_index ? m_value : 0; }
875  };
876 
880  {
882 
883  const Index outer = IsRowMajor ? row : col;
884  const Index inner = IsRowMajor ? col : row;
885 
886  std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
887  std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
888  if(innerNNZ>=room)
889  {
890  // this inner vector is full, we need to reallocate the whole buffer :(
891  reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
892  }
893 
894  Index startId = m_outerIndex[outer];
895  Index p = startId + m_innerNonZeros[outer];
896  while ( (p > startId) && (m_data.index(p-1) > inner) )
897  {
898  m_data.index(p) = m_data.index(p-1);
899  m_data.value(p) = m_data.value(p-1);
900  --p;
901  }
902 
903  m_innerNonZeros[outer]++;
904 
905  m_data.index(p) = inner;
906  return (m_data.value(p) = 0);
907  }
908 
909 public:
913  {
914  const Index outer = IsRowMajor ? row : col;
915  const Index inner = IsRowMajor ? col : row;
916 
918  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
919 
920  Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
921  m_innerNonZeros[outer]++;
922  m_data.index(p) = inner;
923  return (m_data.value(p) = 0);
924  }
925 
926 private:
927  static void check_template_parameters()
928  {
929  EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
930  }
931 
932  struct default_prunning_func {
933  default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
934  inline bool operator() (const Index&, const Index&, const Scalar& value) const
935  {
936  return !internal::isMuchSmallerThan(value, reference, epsilon);
937  }
938  Scalar reference;
939  RealScalar epsilon;
940  };
941 };
942 
943 template<typename Scalar, int _Options, typename _Index>
944 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
945 {
946  public:
947  InnerIterator(const SparseMatrix& mat, Index outer)
948  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
949  {
950  if(mat.isCompressed())
951  m_end = mat.m_outerIndex[outer+1];
952  else
953  m_end = m_id + mat.m_innerNonZeros[outer];
954  }
955 
956  inline InnerIterator& operator++() { m_id++; return *this; }
957 
958  inline const Scalar& value() const { return m_values[m_id]; }
959  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
960 
961  inline Index index() const { return m_indices[m_id]; }
962  inline Index outer() const { return m_outer; }
963  inline Index row() const { return IsRowMajor ? m_outer : index(); }
964  inline Index col() const { return IsRowMajor ? index() : m_outer; }
965 
966  inline operator bool() const { return (m_id < m_end); }
967 
968  protected:
969  const Scalar* m_values;
970  const Index* m_indices;
971  const Index m_outer;
972  Index m_id;
973  Index m_end;
974 };
975 
976 template<typename Scalar, int _Options, typename _Index>
977 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
978 {
979  public:
980  ReverseInnerIterator(const SparseMatrix& mat, Index outer)
981  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
982  {
983  if(mat.isCompressed())
984  m_id = mat.m_outerIndex[outer+1];
985  else
986  m_id = m_start + mat.m_innerNonZeros[outer];
987  }
988 
989  inline ReverseInnerIterator& operator--() { --m_id; return *this; }
990 
991  inline const Scalar& value() const { return m_values[m_id-1]; }
992  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
993 
994  inline Index index() const { return m_indices[m_id-1]; }
995  inline Index outer() const { return m_outer; }
996  inline Index row() const { return IsRowMajor ? m_outer : index(); }
997  inline Index col() const { return IsRowMajor ? index() : m_outer; }
998 
999  inline operator bool() const { return (m_id > m_start); }
1000 
1001  protected:
1002  const Scalar* m_values;
1003  const Index* m_indices;
1004  const Index m_outer;
1005  Index m_id;
1006  const Index m_start;
1007 };
1008 
1009 namespace internal {
1010 
1011 template<typename InputIterator, typename SparseMatrixType>
1012 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
1013 {
1014  EIGEN_UNUSED_VARIABLE(Options);
1015  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1016  typedef typename SparseMatrixType::Scalar Scalar;
1017  typedef typename SparseMatrixType::Index Index;
1018  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
1019 
1020  // pass 1: count the nnz per inner-vector
1021  VectorXi wi(trMat.outerSize());
1022  wi.setZero();
1023  for(InputIterator it(begin); it!=end; ++it)
1024  wi(IsRowMajor ? it->col() : it->row())++;
1025 
1026  // pass 2: insert all the elements into trMat
1027  trMat.reserve(wi);
1028  for(InputIterator it(begin); it!=end; ++it)
1029  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1030 
1031  // pass 3:
1032  trMat.sumupDuplicates();
1033 
1034  // pass 4: transposed copy -> implicit sorting
1035  mat = trMat;
1036 }
1037 
1038 }
1039 
1040 
1078 template<typename Scalar, int _Options, typename _Index>
1079 template<typename InputIterators>
1080 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1081 {
1082  internal::set_from_triplets(begin, end, *this);
1083 }
1084 
1086 template<typename Scalar, int _Options, typename _Index>
1088 {
1089  eigen_assert(!isCompressed());
1090  // TODO, in practice we should be able to use m_innerNonZeros for that task
1091  VectorXi wi(innerSize());
1092  wi.fill(-1);
1093  Index count = 0;
1094  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1095  for(int j=0; j<outerSize(); ++j)
1096  {
1097  Index start = count;
1098  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1099  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1100  {
1101  Index i = m_data.index(k);
1102  if(wi(i)>=start)
1103  {
1104  // we already meet this entry => accumulate it
1105  m_data.value(wi(i)) += m_data.value(k);
1106  }
1107  else
1108  {
1109  m_data.value(count) = m_data.value(k);
1110  m_data.index(count) = m_data.index(k);
1111  wi(i) = count;
1112  ++count;
1113  }
1114  }
1115  m_outerIndex[j] = start;
1116  }
1117  m_outerIndex[m_outerSize] = count;
1118 
1119  // turn the matrix into compressed form
1120  delete[] m_innerNonZeros;
1121  m_innerNonZeros = 0;
1122  m_data.resize(m_outerIndex[m_outerSize]);
1123 }
1124 
1125 } // end namespace Eigen
1126 
1127 #endif // EIGEN_SPARSEMATRIX_H