Medial Code Documentation
Loading...
Searching...
No Matches
SparseMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
41namespace internal {
42template<typename _Scalar, int _Options, typename _Index>
43struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44{
45 typedef _Scalar Scalar;
46 typedef _Index StorageIndex;
47 typedef Sparse StorageKind;
48 typedef MatrixXpr XprKind;
49 enum {
50 RowsAtCompileTime = Dynamic,
51 ColsAtCompileTime = Dynamic,
52 MaxRowsAtCompileTime = Dynamic,
53 MaxColsAtCompileTime = Dynamic,
54 Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
55 SupportedAccessPatterns = InnerRandomAccessPattern
56 };
57};
58
59template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
60struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
61{
64 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
65
66 typedef _Scalar Scalar;
67 typedef Dense StorageKind;
68 typedef _Index StorageIndex;
69 typedef MatrixXpr XprKind;
70
71 enum {
72 RowsAtCompileTime = Dynamic,
73 ColsAtCompileTime = 1,
74 MaxRowsAtCompileTime = Dynamic,
75 MaxColsAtCompileTime = 1,
76 Flags = LvalueBit
77 };
78};
79
80template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
81struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
82 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
83{
84 enum {
85 Flags = 0
86 };
87};
88
89} // end namespace internal
90
91template<typename _Scalar, int _Options, typename _Index>
93 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _Index> >
94{
96 using Base::convert_index;
97 public:
99 using Base::nonZeros;
100 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
101 using Base::operator+=;
102 using Base::operator-=;
103
107 typedef typename Base::InnerIterator InnerIterator;
109
110
111 using Base::IsRowMajor;
113 enum {
114 Options = _Options
115 };
116
117 typedef typename Base::IndexVector IndexVector;
118 typedef typename Base::ScalarVector ScalarVector;
119 protected:
120 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
121
122 Index m_outerSize;
123 Index m_innerSize;
124 StorageIndex* m_outerIndex;
125 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
126 Storage m_data;
127
128 public:
129
131 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
133 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
134
136 inline Index innerSize() const { return m_innerSize; }
138 inline Index outerSize() const { return m_outerSize; }
139
143 inline const Scalar* valuePtr() const { return &m_data.value(0); }
147 inline Scalar* valuePtr() { return &m_data.value(0); }
148
152 inline const StorageIndex* innerIndexPtr() const { return &m_data.index(0); }
156 inline StorageIndex* innerIndexPtr() { return &m_data.index(0); }
157
161 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
165 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
166
170 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
174 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
175
177 inline Storage& data() { return m_data; }
179 inline const Storage& data() const { return m_data; }
180
183 inline Scalar coeff(Index row, Index col) const
184 {
185 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
186
187 const Index outer = IsRowMajor ? row : col;
188 const Index inner = IsRowMajor ? col : row;
189 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
190 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
191 }
192
201 inline Scalar& coeffRef(Index row, Index col)
202 {
203 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
204
205 const Index outer = IsRowMajor ? row : col;
206 const Index inner = IsRowMajor ? col : row;
207
208 Index start = m_outerIndex[outer];
209 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
210 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
211 if(end<=start)
212 return insert(row,col);
213 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
214 if((p<end) && (m_data.index(p)==inner))
215 return m_data.value(p);
216 else
217 return insert(row,col);
218 }
219
235 Scalar& insert(Index row, Index col);
236
237 public:
238
246 inline void setZero()
247 {
248 m_data.clear();
249 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
250 if(m_innerNonZeros)
251 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
252 }
253
257 inline void reserve(Index reserveSize)
258 {
259 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
260 m_data.reserve(reserveSize);
261 }
262
263 #ifdef EIGEN_PARSED_BY_DOXYGEN
276 template<class SizesType>
277 inline void reserve(const SizesType& reserveSizes);
278 #else
279 template<class SizesType>
280 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
281 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
282 typename
283 #endif
284 SizesType::value_type())
285 {
286 EIGEN_UNUSED_VARIABLE(enableif);
287 reserveInnerVectors(reserveSizes);
288 }
289 #endif // EIGEN_PARSED_BY_DOXYGEN
290 protected:
291 template<class SizesType>
292 inline void reserveInnerVectors(const SizesType& reserveSizes)
293 {
294 if(isCompressed())
295 {
296 Index totalReserveSize = 0;
297 // turn the matrix into non-compressed mode
298 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
299 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
300
301 // temporarily use m_innerSizes to hold the new starting points.
302 StorageIndex* newOuterIndex = m_innerNonZeros;
303
304 StorageIndex count = 0;
305 for(Index j=0; j<m_outerSize; ++j)
306 {
307 newOuterIndex[j] = count;
308 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
309 totalReserveSize += reserveSizes[j];
310 }
311 m_data.reserve(totalReserveSize);
312 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
313 for(Index j=m_outerSize-1; j>=0; --j)
314 {
315 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
316 for(Index i=innerNNZ-1; i>=0; --i)
317 {
318 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
319 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
320 }
321 previousOuterIndex = m_outerIndex[j];
322 m_outerIndex[j] = newOuterIndex[j];
323 m_innerNonZeros[j] = innerNNZ;
324 }
325 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
326
327 m_data.resize(m_outerIndex[m_outerSize]);
328 }
329 else
330 {
331 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
332 if (!newOuterIndex) internal::throw_std_bad_alloc();
333
334 StorageIndex count = 0;
335 for(Index j=0; j<m_outerSize; ++j)
336 {
337 newOuterIndex[j] = count;
338 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
339 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
340 count += toReserve + m_innerNonZeros[j];
341 }
342 newOuterIndex[m_outerSize] = count;
343
344 m_data.resize(count);
345 for(Index j=m_outerSize-1; j>=0; --j)
346 {
347 Index offset = newOuterIndex[j] - m_outerIndex[j];
348 if(offset>0)
349 {
350 StorageIndex innerNNZ = m_innerNonZeros[j];
351 for(Index i=innerNNZ-1; i>=0; --i)
352 {
353 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
354 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
355 }
356 }
357 }
358
359 std::swap(m_outerIndex, newOuterIndex);
360 std::free(newOuterIndex);
361 }
362
363 }
364 public:
365
366 //--- low level purely coherent filling ---
367
378 inline Scalar& insertBack(Index row, Index col)
379 {
380 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
381 }
382
385 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
386 {
387 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
388 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
389 Index p = m_outerIndex[outer+1];
390 ++m_outerIndex[outer+1];
391 m_data.append(Scalar(0), inner);
392 return m_data.value(p);
393 }
394
397 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
398 {
399 Index p = m_outerIndex[outer+1];
400 ++m_outerIndex[outer+1];
401 m_data.append(Scalar(0), inner);
402 return m_data.value(p);
403 }
404
407 inline void startVec(Index outer)
408 {
409 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
410 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
411 m_outerIndex[outer+1] = m_outerIndex[outer];
412 }
413
417 inline void finalize()
418 {
419 if(isCompressed())
420 {
421 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
422 Index i = m_outerSize;
423 // find the last filled column
424 while (i>=0 && m_outerIndex[i]==0)
425 --i;
426 ++i;
427 while (i<=m_outerSize)
428 {
429 m_outerIndex[i] = size;
430 ++i;
431 }
432 }
433 }
434
435 //---
436
437 template<typename InputIterators>
438 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
439
440 template<typename InputIterators,typename DupFunctor>
442
443 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar>()); }
444
445 template<typename DupFunctor>
446 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
447
448 //---
449
452 Scalar& insertByOuterInner(Index j, Index i)
453 {
454 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
455 }
456
460 {
461 if(isCompressed())
462 return;
463
464 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
465
466 Index oldStart = m_outerIndex[1];
467 m_outerIndex[1] = m_innerNonZeros[0];
468 for(Index j=1; j<m_outerSize; ++j)
469 {
470 Index nextOldStart = m_outerIndex[j+1];
471 Index offset = oldStart - m_outerIndex[j];
472 if(offset>0)
473 {
474 for(Index k=0; k<m_innerNonZeros[j]; ++k)
475 {
476 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
477 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
478 }
479 }
480 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
482 }
483 std::free(m_innerNonZeros);
484 m_innerNonZeros = 0;
485 m_data.resize(m_outerIndex[m_outerSize]);
486 m_data.squeeze();
487 }
488
491 {
492 if(m_innerNonZeros != 0)
493 return;
494 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
495 for (Index i = 0; i < m_outerSize; i++)
496 {
497 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
498 }
499 }
500
502 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
503 {
504 prune(default_prunning_func(reference,epsilon));
505 }
506
514 template<typename KeepFunc>
515 void prune(const KeepFunc& keep = KeepFunc())
516 {
517 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
519
520 StorageIndex k = 0;
521 for(Index j=0; j<m_outerSize; ++j)
522 {
523 Index previousStart = m_outerIndex[j];
524 m_outerIndex[j] = k;
525 Index end = m_outerIndex[j+1];
526 for(Index i=previousStart; i<end; ++i)
527 {
528 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
529 {
530 m_data.value(k) = m_data.value(i);
531 m_data.index(k) = m_data.index(i);
532 ++k;
533 }
534 }
535 }
536 m_outerIndex[m_outerSize] = k;
537 m_data.resize(k,0);
538 }
539
543 void conservativeResize(Index rows, Index cols)
544 {
545 // No change
546 if (this->rows() == rows && this->cols() == cols) return;
547
548 // If one dimension is null, then there is nothing to be preserved
549 if(rows==0 || cols==0) return resize(rows,cols);
550
551 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
552 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
553 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
554
555 // Deals with inner non zeros
556 if (m_innerNonZeros)
557 {
558 // Resize m_innerNonZeros
559 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
560 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
561 m_innerNonZeros = newInnerNonZeros;
562
563 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
564 m_innerNonZeros[i] = 0;
565 }
566 else if (innerChange < 0)
567 {
568 // Inner size decreased: allocate a new m_innerNonZeros
569 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
570 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
571 for(Index i = 0; i < m_outerSize; i++)
572 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
573 }
574
575 // Change the m_innerNonZeros in case of a decrease of inner size
576 if (m_innerNonZeros && innerChange < 0)
577 {
578 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
579 {
580 StorageIndex &n = m_innerNonZeros[i];
581 StorageIndex start = m_outerIndex[i];
582 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
583 }
584 }
585
586 m_innerSize = newInnerSize;
587
588 // Re-allocate outer index structure if necessary
589 if (outerChange == 0)
590 return;
591
592 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
593 if (!newOuterIndex) internal::throw_std_bad_alloc();
594 m_outerIndex = newOuterIndex;
595 if (outerChange > 0)
596 {
597 StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
598 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
599 m_outerIndex[i] = last;
600 }
601 m_outerSize += outerChange;
602 }
603
611 void resize(Index rows, Index cols)
612 {
613 const Index outerSize = IsRowMajor ? rows : cols;
614 m_innerSize = IsRowMajor ? cols : rows;
615 m_data.clear();
616 if (m_outerSize != outerSize || m_outerSize==0)
617 {
618 std::free(m_outerIndex);
619 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
620 if (!m_outerIndex) internal::throw_std_bad_alloc();
621
622 m_outerSize = outerSize;
623 }
624 if(m_innerNonZeros)
625 {
626 std::free(m_innerNonZeros);
627 m_innerNonZeros = 0;
628 }
629 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
630 }
631
634 void resizeNonZeros(Index size)
635 {
636 m_data.resize(size);
637 }
638
641
647
650 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
651 {
652 check_template_parameters();
653 resize(0, 0);
654 }
655
657 inline SparseMatrix(Index rows, Index cols)
658 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
659 {
660 check_template_parameters();
661 resize(rows, cols);
662 }
663
665 template<typename OtherDerived>
667 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
668 {
670 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
671 check_template_parameters();
673 if (needToTranspose)
674 *this = other.derived();
675 else
676 {
677 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
679 #endif
680 internal::call_assignment_no_alias(*this, other.derived());
681 }
682 }
683
685 template<typename OtherDerived, unsigned int UpLo>
687 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
688 {
689 check_template_parameters();
690 Base::operator=(other);
691 }
692
694 inline SparseMatrix(const SparseMatrix& other)
695 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
696 {
697 check_template_parameters();
698 *this = other.derived();
699 }
700
702 template<typename OtherDerived>
704 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
705 {
706 check_template_parameters();
707 initAssignment(other);
708 other.evalTo(*this);
709 }
710
712 template<typename OtherDerived>
714 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
715 {
716 check_template_parameters();
717 *this = other.derived();
718 }
719
722 inline void swap(SparseMatrix& other)
723 {
724 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
725 std::swap(m_outerIndex, other.m_outerIndex);
726 std::swap(m_innerSize, other.m_innerSize);
727 std::swap(m_outerSize, other.m_outerSize);
728 std::swap(m_innerNonZeros, other.m_innerNonZeros);
729 m_data.swap(other.m_data);
730 }
731
734 inline void setIdentity()
735 {
736 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
737 this->m_data.resize(rows());
738 Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, StorageIndex(rows()-1));
739 Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes();
740 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
741 std::free(m_innerNonZeros);
742 m_innerNonZeros = 0;
743 }
744 inline SparseMatrix& operator=(const SparseMatrix& other)
745 {
746 if (other.isRValue())
747 {
748 swap(other.const_cast_derived());
749 }
750 else if(this!=&other)
751 {
752 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
753 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
754 #endif
755 initAssignment(other);
756 if(other.isCompressed())
757 {
758 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
759 m_data = other.m_data;
760 }
761 else
762 {
763 Base::operator=(other);
764 }
765 }
766 return *this;
767 }
768
769#ifndef EIGEN_PARSED_BY_DOXYGEN
770 template<typename OtherDerived>
771 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
772 { return Base::operator=(other.derived()); }
773#endif // EIGEN_PARSED_BY_DOXYGEN
774
775 template<typename OtherDerived>
776 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
777
778 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
779 {
780 EIGEN_DBG_SPARSE(
781 s << "Nonzero entries:\n";
782 if(m.isCompressed())
783 for (Index i=0; i<m.nonZeros(); ++i)
784 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
785 else
786 for (Index i=0; i<m.outerSize(); ++i)
787 {
788 Index p = m.m_outerIndex[i];
789 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
790 Index k=p;
791 for (; k<pe; ++k)
792 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
793 for (; k<m.m_outerIndex[i+1]; ++k)
794 s << "(_,_) ";
795 }
796 s << std::endl;
797 s << std::endl;
798 s << "Outer pointers:\n";
799 for (Index i=0; i<m.outerSize(); ++i)
800 s << m.m_outerIndex[i] << " ";
801 s << " $" << std::endl;
802 if(!m.isCompressed())
803 {
804 s << "Inner non zeros:\n";
805 for (Index i=0; i<m.outerSize(); ++i)
806 s << m.m_innerNonZeros[i] << " ";
807 s << " $" << std::endl;
808 }
809 s << std::endl;
810 );
811 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
812 return s;
813 }
814
817 {
818 std::free(m_outerIndex);
819 std::free(m_innerNonZeros);
820 }
821
823 Scalar sum() const;
824
825# ifdef EIGEN_SPARSEMATRIX_PLUGIN
826# include EIGEN_SPARSEMATRIX_PLUGIN
827# endif
828
829protected:
830
831 template<typename Other>
832 void initAssignment(const Other& other)
833 {
834 resize(other.rows(), other.cols());
835 if(m_innerNonZeros)
836 {
837 std::free(m_innerNonZeros);
838 m_innerNonZeros = 0;
839 }
840 }
841
844 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
845
849 {
850 StorageIndex m_index;
851 StorageIndex m_value;
852 public:
853 typedef StorageIndex value_type;
854 SingletonVector(Index i, Index v)
855 : m_index(convert_index(i)), m_value(convert_index(v))
856 {}
857
858 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
859 };
860
863 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
864
865public:
868 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
869 {
870 const Index outer = IsRowMajor ? row : col;
871 const Index inner = IsRowMajor ? col : row;
872
873 eigen_assert(!isCompressed());
874 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
875
876 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
877 m_data.index(p) = convert_index(inner);
878 return (m_data.value(p) = 0);
879 }
880
881private:
882 static void check_template_parameters()
883 {
884 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
885 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
886 }
887
888 struct default_prunning_func {
889 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
890 inline bool operator() (const Index&, const Index&, const Scalar& value) const
891 {
892 return !internal::isMuchSmallerThan(value, reference, epsilon);
893 }
894 Scalar reference;
895 RealScalar epsilon;
896 };
897};
898
899namespace internal {
900
901template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
902void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
903{
904 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
905 typedef typename SparseMatrixType::Scalar Scalar;
906 typedef typename SparseMatrixType::StorageIndex StorageIndex;
907 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
908
909 if(begin!=end)
910 {
911 // pass 1: count the nnz per inner-vector
912 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
913 wi.setZero();
914 for(InputIterator it(begin); it!=end; ++it)
915 {
916 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
917 wi(IsRowMajor ? it->col() : it->row())++;
918 }
919
920 // pass 2: insert all the elements into trMat
921 trMat.reserve(wi);
922 for(InputIterator it(begin); it!=end; ++it)
923 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
924
925 // pass 3:
926 trMat.collapseDuplicates(dup_func);
927 }
928
929 // pass 4: transposed copy -> implicit sorting
930 mat = trMat;
931}
932
933}
934
935
973template<typename Scalar, int _Options, typename _Index>
974template<typename InputIterators>
976{
977 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar>());
978}
979
989template<typename Scalar, int _Options, typename _Index>
990template<typename InputIterators,typename DupFunctor>
992{
993 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index>, DupFunctor>(begin, end, *this, dup_func);
994}
995
997template<typename Scalar, int _Options, typename _Index>
998template<typename DupFunctor>
1000{
1001 eigen_assert(!isCompressed());
1002 // TODO, in practice we should be able to use m_innerNonZeros for that task
1003 IndexVector wi(innerSize());
1004 wi.fill(-1);
1005 StorageIndex count = 0;
1006 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1007 for(Index j=0; j<outerSize(); ++j)
1008 {
1009 StorageIndex start = count;
1010 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1011 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1012 {
1013 Index i = m_data.index(k);
1014 if(wi(i)>=start)
1015 {
1016 // we already meet this entry => accumulate it
1017 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1018 }
1019 else
1020 {
1021 m_data.value(count) = m_data.value(k);
1022 m_data.index(count) = m_data.index(k);
1023 wi(i) = count;
1024 ++count;
1025 }
1026 }
1027 m_outerIndex[j] = start;
1028 }
1029 m_outerIndex[m_outerSize] = count;
1030
1031 // turn the matrix into compressed form
1032 std::free(m_innerNonZeros);
1033 m_innerNonZeros = 0;
1034 m_data.resize(m_outerIndex[m_outerSize]);
1035}
1036
1037template<typename Scalar, int _Options, typename _Index>
1038template<typename OtherDerived>
1039EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
1040{
1041 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1042 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1043
1044 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1045 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1046 #endif
1047
1048 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1049 if (needToTranspose)
1050 {
1051 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1052 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1053 #endif
1054 // two passes algorithm:
1055 // 1 - compute the number of coeffs per dest inner vector
1056 // 2 - do the actual copy/eval
1057 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1058 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1059 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1060 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1061 OtherCopy otherCopy(other.derived());
1062 OtherCopyEval otherCopyEval(otherCopy);
1063
1064 SparseMatrix dest(other.rows(),other.cols());
1065 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1066
1067 // pass 1
1068 // FIXME the above copy could be merged with that pass
1069 for (Index j=0; j<otherCopy.outerSize(); ++j)
1070 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1071 ++dest.m_outerIndex[it.index()];
1072
1073 // prefix sum
1074 StorageIndex count = 0;
1075 IndexVector positions(dest.outerSize());
1076 for (Index j=0; j<dest.outerSize(); ++j)
1077 {
1078 Index tmp = dest.m_outerIndex[j];
1079 dest.m_outerIndex[j] = count;
1080 positions[j] = count;
1081 count += tmp;
1082 }
1083 dest.m_outerIndex[dest.outerSize()] = count;
1084 // alloc
1085 dest.m_data.resize(count);
1086 // pass 2
1087 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1088 {
1089 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1090 {
1091 Index pos = positions[it.index()]++;
1092 dest.m_data.index(pos) = j;
1093 dest.m_data.value(pos) = it.value();
1094 }
1095 }
1096 this->swap(dest);
1097 return *this;
1098 }
1099 else
1100 {
1101 if(other.isRValue())
1102 {
1103 initAssignment(other.derived());
1104 }
1105 // there is no special optimization
1106 return Base::operator=(other.derived());
1107 }
1108}
1109
1110template<typename _Scalar, int _Options, typename _Index>
1111typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insert(Index row, Index col)
1112{
1113 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1114
1115 const Index outer = IsRowMajor ? row : col;
1116 const Index inner = IsRowMajor ? col : row;
1117
1118 if(isCompressed())
1119 {
1120 if(nonZeros()==0)
1121 {
1122 // reserve space if not already done
1123 if(m_data.allocatedSize()==0)
1124 m_data.reserve(2*m_innerSize);
1125
1126 // turn the matrix into non-compressed mode
1127 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1128 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1129
1130 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1131
1132 // pack all inner-vectors to the end of the pre-allocated space
1133 // and allocate the entire free-space to the first inner-vector
1134 StorageIndex end = convert_index(m_data.allocatedSize());
1135 for(Index j=1; j<=m_outerSize; ++j)
1136 m_outerIndex[j] = end;
1137 }
1138 else
1139 {
1140 // turn the matrix into non-compressed mode
1141 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1142 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1143 for(Index j=0; j<m_outerSize; ++j)
1144 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1145 }
1146 }
1147
1148 // check whether we can do a fast "push back" insertion
1149 Index data_end = m_data.allocatedSize();
1150
1151 // First case: we are filling a new inner vector which is packed at the end.
1152 // We assume that all remaining inner-vectors are also empty and packed to the end.
1153 if(m_outerIndex[outer]==data_end)
1154 {
1155 eigen_internal_assert(m_innerNonZeros[outer]==0);
1156
1157 // pack previous empty inner-vectors to end of the used-space
1158 // and allocate the entire free-space to the current inner-vector.
1159 StorageIndex p = convert_index(m_data.size());
1160 Index j = outer;
1161 while(j>=0 && m_innerNonZeros[j]==0)
1162 m_outerIndex[j--] = p;
1163
1164 // push back the new element
1165 ++m_innerNonZeros[outer];
1166 m_data.append(Scalar(0), inner);
1167
1168 // check for reallocation
1169 if(data_end != m_data.allocatedSize())
1170 {
1171 // m_data has been reallocated
1172 // -> move remaining inner-vectors back to the end of the free-space
1173 // so that the entire free-space is allocated to the current inner-vector.
1174 eigen_internal_assert(data_end < m_data.allocatedSize());
1175 StorageIndex new_end = convert_index(m_data.allocatedSize());
1176 for(Index k=outer+1; k<=m_outerSize; ++k)
1177 if(m_outerIndex[k]==data_end)
1178 m_outerIndex[k] = new_end;
1179 }
1180 return m_data.value(p);
1181 }
1182
1183 // Second case: the next inner-vector is packed to the end
1184 // and the current inner-vector end match the used-space.
1185 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1186 {
1187 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1188
1189 // add space for the new element
1190 ++m_innerNonZeros[outer];
1191 m_data.resize(m_data.size()+1);
1192
1193 // check for reallocation
1194 if(data_end != m_data.allocatedSize())
1195 {
1196 // m_data has been reallocated
1197 // -> move remaining inner-vectors back to the end of the free-space
1198 // so that the entire free-space is allocated to the current inner-vector.
1199 eigen_internal_assert(data_end < m_data.allocatedSize());
1200 StorageIndex new_end = convert_index(m_data.allocatedSize());
1201 for(Index k=outer+1; k<=m_outerSize; ++k)
1202 if(m_outerIndex[k]==data_end)
1203 m_outerIndex[k] = new_end;
1204 }
1205
1206 // and insert it at the right position (sorted insertion)
1207 Index startId = m_outerIndex[outer];
1208 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1209 while ( (p > startId) && (m_data.index(p-1) > inner) )
1210 {
1211 m_data.index(p) = m_data.index(p-1);
1212 m_data.value(p) = m_data.value(p-1);
1213 --p;
1214 }
1215
1216 m_data.index(p) = convert_index(inner);
1217 return (m_data.value(p) = 0);
1218 }
1219
1220 if(m_data.size() != m_data.allocatedSize())
1221 {
1222 // make sure the matrix is compatible to random un-compressed insertion:
1223 m_data.resize(m_data.allocatedSize());
1224 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1225 }
1226
1227 return insertUncompressed(row,col);
1228}
1229
1230template<typename _Scalar, int _Options, typename _Index>
1231EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
1232{
1233 eigen_assert(!isCompressed());
1234
1235 const Index outer = IsRowMajor ? row : col;
1236 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1237
1238 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1239 StorageIndex innerNNZ = m_innerNonZeros[outer];
1240 if(innerNNZ>=room)
1241 {
1242 // this inner vector is full, we need to reallocate the whole buffer :(
1243 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1244 }
1245
1246 Index startId = m_outerIndex[outer];
1247 Index p = startId + m_innerNonZeros[outer];
1248 while ( (p > startId) && (m_data.index(p-1) > inner) )
1249 {
1250 m_data.index(p) = m_data.index(p-1);
1251 m_data.value(p) = m_data.value(p-1);
1252 --p;
1253 }
1254 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1255
1256 m_innerNonZeros[outer]++;
1257
1258 m_data.index(p) = inner;
1259 return (m_data.value(p) = 0);
1260}
1261
1262template<typename _Scalar, int _Options, typename _Index>
1263EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1264{
1265 eigen_assert(isCompressed());
1266
1267 const Index outer = IsRowMajor ? row : col;
1268 const Index inner = IsRowMajor ? col : row;
1269
1270 Index previousOuter = outer;
1271 if (m_outerIndex[outer+1]==0)
1272 {
1273 // we start a new inner vector
1274 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1275 {
1276 m_outerIndex[previousOuter] = convert_index(m_data.size());
1277 --previousOuter;
1278 }
1279 m_outerIndex[outer+1] = m_outerIndex[outer];
1280 }
1281
1282 // here we have to handle the tricky case where the outerIndex array
1283 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1284 // the 2nd inner vector...
1285 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1286 && (size_t(m_outerIndex[outer+1]) == m_data.size());
1287
1288 size_t startId = m_outerIndex[outer];
1289 // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1290 size_t p = m_outerIndex[outer+1];
1291 ++m_outerIndex[outer+1];
1292
1293 double reallocRatio = 1;
1294 if (m_data.allocatedSize()<=m_data.size())
1295 {
1296 // if there is no preallocated memory, let's reserve a minimum of 32 elements
1297 if (m_data.size()==0)
1298 {
1299 m_data.reserve(32);
1300 }
1301 else
1302 {
1303 // we need to reallocate the data, to reduce multiple reallocations
1304 // we use a smart resize algorithm based on the current filling ratio
1305 // in addition, we use double to avoid integers overflows
1306 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1307 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1308 // furthermore we bound the realloc ratio to:
1309 // 1) reduce multiple minor realloc when the matrix is almost filled
1310 // 2) avoid to allocate too much memory when the matrix is almost empty
1311 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1312 }
1313 }
1314 m_data.resize(m_data.size()+1,reallocRatio);
1315
1316 if (!isLastVec)
1317 {
1318 if (previousOuter==-1)
1319 {
1320 // oops wrong guess.
1321 // let's correct the outer offsets
1322 for (Index k=0; k<=(outer+1); ++k)
1323 m_outerIndex[k] = 0;
1324 Index k=outer+1;
1325 while(m_outerIndex[k]==0)
1326 m_outerIndex[k++] = 1;
1327 while (k<=m_outerSize && m_outerIndex[k]!=0)
1328 m_outerIndex[k++]++;
1329 p = 0;
1330 --k;
1331 k = m_outerIndex[k]-1;
1332 while (k>0)
1333 {
1334 m_data.index(k) = m_data.index(k-1);
1335 m_data.value(k) = m_data.value(k-1);
1336 k--;
1337 }
1338 }
1339 else
1340 {
1341 // we are not inserting into the last inner vec
1342 // update outer indices:
1343 Index j = outer+2;
1344 while (j<=m_outerSize && m_outerIndex[j]!=0)
1345 m_outerIndex[j++]++;
1346 --j;
1347 // shift data of last vecs:
1348 Index k = m_outerIndex[j]-1;
1349 while (k>=Index(p))
1350 {
1351 m_data.index(k) = m_data.index(k-1);
1352 m_data.value(k) = m_data.value(k-1);
1353 k--;
1354 }
1355 }
1356 }
1357
1358 while ( (p > startId) && (m_data.index(p-1) > inner) )
1359 {
1360 m_data.index(p) = m_data.index(p-1);
1361 m_data.value(p) = m_data.value(p-1);
1362 --p;
1363 }
1364
1365 m_data.index(p) = inner;
1366 return (m_data.value(p) = 0);
1367}
1368
1369namespace internal {
1370
1371template<typename _Scalar, int _Options, typename _Index>
1372struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
1373 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
1374{
1377 evaluator() : Base() {}
1378 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1379};
1380
1381}
1382
1383} // end namespace Eigen
1384
1385#endif // EIGEN_SPARSEMATRIX_H
General-purpose arrays with easy API for coefficient-wise operations.
Definition Array.h:47
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:65
Pseudo expression representing a solving operation.
Definition Solve.h:63
Definition SparseCompressedBase.h:108
Definition SparseCompressedBase.h:167
Definition SparseCompressedBase.h:28
Index nonZeros() const
Definition SparseCompressedBase.h:46
bool isCompressed() const
Definition SparseCompressedBase.h:97
Index size() const
Definition SparseMatrixBase.h:168
@ Flags
This stores expression Flags flags which may or may not be inherited by new expressions constructed f...
Definition SparseMatrixBase.h:92
NumTraits< Scalar >::Real RealScalar
This is the "real scalar" type; if the Scalar type is already real numbers (e.g.
Definition SparseMatrixBase.h:125
Definition SparseMatrix.h:849
A versatible sparse matrix representation.
Definition SparseMatrix.h:94
Scalar sum() const
Overloaded for performance.
Definition SparseRedux.h:30
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Constructs a sparse matrix from the sparse expression other.
Definition SparseMatrix.h:666
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
The same as setFromTriplets but when duplicates are met the functor dup_func is applied:
Definition SparseMatrix.h:991
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:703
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:161
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:156
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:170
~SparseMatrix()
Destructor.
Definition SparseMatrix.h:816
void conservativeResize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix leaving old values untouched.
Definition SparseMatrix.h:543
void setIdentity()
Sets *this to the identity matrix.
Definition SparseMatrix.h:734
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:152
const Scalar * valuePtr() const
Definition SparseMatrix.h:143
void uncompress()
Turns the matrix into the uncompressed mode.
Definition SparseMatrix.h:490
Scalar * valuePtr()
Definition SparseMatrix.h:147
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:713
void reserve(Index reserveSize)
Preallocates reserveSize non zeros.
Definition SparseMatrix.h:257
SparseMatrix()
Default constructor yielding an empty 0 x 0 matrix.
Definition SparseMatrix.h:649
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:183
Index rows() const
Definition SparseMatrix.h:131
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:165
bool isCompressed() const
Definition SparseCompressedBase.h:97
SparseMatrix(const SparseMatrix &other)
Copy constructor (it performs a deep copy)
Definition SparseMatrix.h:694
void resize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix and initializes it to zero.
Definition SparseMatrix.h:611
Index cols() const
Definition SparseMatrix.h:133
Index innerSize() const
Definition SparseMatrix.h:136
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Constructs a sparse matrix from the sparse selfadjoint view other.
Definition SparseMatrix.h:686
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:640
void prune(const KeepFunc &keep=KeepFunc())
Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predica...
Definition SparseMatrix.h:515
Index outerSize() const
Definition SparseMatrix.h:138
void swap(SparseMatrix &other)
Swaps the content of two sparse matrices of the same type.
Definition SparseMatrix.h:722
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:174
void setZero()
Removes all non zeros but keep allocated memory.
Definition SparseMatrix.h:246
SparseMatrix(Index rows, Index cols)
Constructs a rows x cols empty matrix.
Definition SparseMatrix.h:657
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Fill the matrix *this with the list of triplets defined by the iterator range begin - end.
Definition SparseMatrix.h:975
void makeCompressed()
Turns the matrix into the compressed format.
Definition SparseMatrix.h:459
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Suppresses all nonzeros which are much smaller than reference under the tolerence epsilon.
Definition SparseMatrix.h:502
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:201
DiagonalReturnType diagonal()
Definition SparseMatrix.h:646
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1111
Definition CompressedStorage.h:23
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Like at(), but the search is performed in the range [start,end)
Definition CompressedStorage.h:154
Index searchLowerIndex(Index key) const
Definition CompressedStorage.h:120
@ ColMajor
Storage order is column major (see TopicStorageOrders).
Definition Constants.h:320
@ RowMajor
Storage order is row major (see TopicStorageOrders).
Definition Constants.h:322
const unsigned int LvalueBit
Means the expression has a coeffRef() method, i.e.
Definition Constants.h:138
const unsigned int RowMajorBit
for a matrix, this means that the storage order is row-major.
Definition Constants.h:61
const unsigned int CompressedAccessBit
Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed ...
Definition Constants.h:185
NLOHMANN_BASIC_JSON_TPL_DECLARATION void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL &j1, nlohmann::NLOHMANN_BASIC_JSON_TPL &j2) noexcept(//NOLINT(readability-inconsistent-declaration-parameter-name, cert-dcl58-cpp) is_nothrow_move_constructible< nlohmann::NLOHMANN_BASIC_JSON_TPL >::value &&//NOLINT(misc-redundant-expression) is_nothrow_move_assignable< nlohmann::NLOHMANN_BASIC_JSON_TPL >::value)
exchanges the values of two JSON objects
Definition json.hpp:24418
The type used to identify a dense storage.
Definition Constants.h:490
Eigen::Index Index
The interface type of indices.
Definition EigenBase.h:37
The type used to identify a matrix expression.
Definition Constants.h:505
Holds information about the various numeric (i.e.
Definition NumTraits.h:108
The type used to identify a general sparse storage.
Definition Constants.h:493
Definition CoreEvaluators.h:82
Definition Meta.h:39
Definition BinaryFunctors.h:24
Definition ForwardDeclarations.h:17
Definition Meta.h:30