10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
42template<
typename _Scalar,
int _Options,
typename _Index>
45 typedef _Scalar Scalar;
50 RowsAtCompileTime = Dynamic,
51 ColsAtCompileTime = Dynamic,
52 MaxRowsAtCompileTime = Dynamic,
53 MaxColsAtCompileTime = Dynamic,
55 SupportedAccessPatterns = InnerRandomAccessPattern
59template<
typename _Scalar,
int _Options,
typename _Index,
int DiagIndex>
64 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
66 typedef _Scalar Scalar;
72 RowsAtCompileTime = Dynamic,
73 ColsAtCompileTime = 1,
74 MaxRowsAtCompileTime = Dynamic,
75 MaxColsAtCompileTime = 1,
80template<
typename _Scalar,
int _Options,
typename _Index,
int DiagIndex>
82 :
public traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
91template<
typename _Scalar,
int _Options,
typename _Index>
96 using Base::convert_index;
101 using Base::operator+=;
102 using Base::operator-=;
111 using Base::IsRowMajor;
124 StorageIndex* m_outerIndex;
125 StorageIndex* m_innerNonZeros;
131 inline Index
rows()
const {
return IsRowMajor ? m_outerSize : m_innerSize; }
133 inline Index
cols()
const {
return IsRowMajor ? m_innerSize : m_outerSize; }
143 inline const Scalar*
valuePtr()
const {
return &m_data.value(0); }
147 inline Scalar*
valuePtr() {
return &m_data.value(0); }
152 inline const StorageIndex*
innerIndexPtr()
const {
return &m_data.index(0); }
177 inline Storage& data() {
return m_data; }
179 inline const Storage& data()
const {
return m_data; }
183 inline Scalar
coeff(Index row, Index col)
const
185 eigen_assert(row>=0 && row<
rows() && col>=0 && col<
cols());
187 const Index outer = IsRowMajor ? row : col;
188 const Index inner = IsRowMajor ? col : row;
189 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
190 return m_data.
atInRange(m_outerIndex[outer], end, StorageIndex(inner));
203 eigen_assert(row>=0 && row<
rows() && col>=0 && col<
cols());
205 const Index outer = IsRowMajor ? row : col;
206 const Index inner = IsRowMajor ? col : row;
208 Index start = m_outerIndex[outer];
209 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
210 eigen_assert(end>=start &&
"you probably called coeffRef on a non finalized matrix");
214 if((p<end) && (m_data.index(p)==inner))
215 return m_data.value(p);
249 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(StorageIndex));
251 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(StorageIndex));
259 eigen_assert(
isCompressed() &&
"This function does not make sense in non compressed mode.");
263 #ifdef EIGEN_PARSED_BY_DOXYGEN
276 template<
class SizesType>
279 template<
class SizesType>
281 #
if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500)
284 SizesType::value_type())
291 template<
class SizesType>
292 inline void reserveInnerVectors(
const SizesType& reserveSizes)
296 Index totalReserveSize = 0;
298 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
299 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
302 StorageIndex* newOuterIndex = m_innerNonZeros;
304 StorageIndex count = 0;
305 for(Index j=0; j<m_outerSize; ++j)
307 newOuterIndex[j] = count;
308 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
309 totalReserveSize += reserveSizes[j];
311 m_data.reserve(totalReserveSize);
312 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
313 for(Index j=m_outerSize-1; j>=0; --j)
315 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
316 for(Index i=innerNNZ-1; i>=0; --i)
318 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
319 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
321 previousOuterIndex = m_outerIndex[j];
322 m_outerIndex[j] = newOuterIndex[j];
323 m_innerNonZeros[j] = innerNNZ;
325 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
327 m_data.resize(m_outerIndex[m_outerSize]);
331 StorageIndex* newOuterIndex =
static_cast<StorageIndex*
>(std::malloc((m_outerSize+1)*
sizeof(StorageIndex)));
332 if (!newOuterIndex) internal::throw_std_bad_alloc();
334 StorageIndex count = 0;
335 for(Index j=0; j<m_outerSize; ++j)
337 newOuterIndex[j] = count;
338 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
339 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
340 count += toReserve + m_innerNonZeros[j];
342 newOuterIndex[m_outerSize] = count;
344 m_data.resize(count);
345 for(Index j=m_outerSize-1; j>=0; --j)
347 Index offset = newOuterIndex[j] - m_outerIndex[j];
350 StorageIndex innerNNZ = m_innerNonZeros[j];
351 for(Index i=innerNNZ-1; i>=0; --i)
353 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
354 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
360 std::free(newOuterIndex);
378 inline Scalar& insertBack(Index row, Index col)
380 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
385 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
387 eigen_assert(
Index(m_outerIndex[outer+1]) == m_data.size() &&
"Invalid ordered insertion (invalid outer index)");
388 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) &&
"Invalid ordered insertion (invalid inner index)");
389 Index p = m_outerIndex[outer+1];
390 ++m_outerIndex[outer+1];
391 m_data.append(Scalar(0), inner);
392 return m_data.value(p);
397 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
399 Index p = m_outerIndex[outer+1];
400 ++m_outerIndex[outer+1];
401 m_data.append(Scalar(0), inner);
402 return m_data.value(p);
407 inline void startVec(Index outer)
409 eigen_assert(m_outerIndex[outer]==
Index(m_data.size()) &&
"You must call startVec for each inner vector sequentially");
410 eigen_assert(m_outerIndex[outer+1]==0 &&
"You must call startVec for each inner vector sequentially");
411 m_outerIndex[outer+1] = m_outerIndex[outer];
417 inline void finalize()
421 StorageIndex
size = internal::convert_index<StorageIndex>(m_data.size());
422 Index i = m_outerSize;
424 while (i>=0 && m_outerIndex[i]==0)
427 while (i<=m_outerSize)
429 m_outerIndex[i] =
size;
437 template<
typename InputIterators>
440 template<
typename InputIterators,
typename DupFunctor>
445 template<
typename DupFunctor>
452 Scalar& insertByOuterInner(Index
j, Index i)
454 return insert(IsRowMajor ?
j : i, IsRowMajor ? i :
j);
464 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
467 m_outerIndex[1] = m_innerNonZeros[0];
468 for(Index
j=1;
j<m_outerSize; ++
j)
471 Index offset =
oldStart - m_outerIndex[
j];
474 for(Index k=0; k<m_innerNonZeros[
j]; ++k)
476 m_data.index(m_outerIndex[
j]+k) = m_data.index(
oldStart+k);
477 m_data.value(m_outerIndex[
j]+k) = m_data.value(
oldStart+k);
480 m_outerIndex[
j+1] = m_outerIndex[
j] + m_innerNonZeros[
j];
483 std::free(m_innerNonZeros);
485 m_data.resize(m_outerIndex[m_outerSize]);
492 if(m_innerNonZeros != 0)
494 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
495 for (Index i = 0; i < m_outerSize; i++)
497 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
504 prune(default_prunning_func(reference,epsilon));
514 template<
typename KeepFunc>
521 for(Index
j=0;
j<m_outerSize; ++
j)
525 Index end = m_outerIndex[
j+1];
528 if(
keep(IsRowMajor?
j:m_data.index(i), IsRowMajor?m_data.index(i):
j, m_data.value(i)))
530 m_data.value(k) = m_data.value(i);
531 m_data.index(k) = m_data.index(i);
536 m_outerIndex[m_outerSize] = k;
546 if (this->
rows() == rows && this->
cols() == cols)
return;
559 StorageIndex *
newInnerNonZeros =
static_cast<StorageIndex*
>(std::realloc(m_innerNonZeros, (m_outerSize +
outerChange) *
sizeof(StorageIndex)));
563 for(Index i=m_outerSize; i<m_outerSize+
outerChange; i++)
564 m_innerNonZeros[i] = 0;
569 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc((m_outerSize+
outerChange+1) *
sizeof(StorageIndex)));
570 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
571 for(Index i = 0; i < m_outerSize; i++)
572 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
578 for(Index i = 0; i < m_outerSize + (std::min)(
outerChange, Index(0)); i++)
580 StorageIndex &n = m_innerNonZeros[i];
581 StorageIndex start = m_outerIndex[i];
582 while (n > 0 && m_data.index(start+n-1) >=
newInnerSize) --n;
592 StorageIndex *
newOuterIndex =
static_cast<StorageIndex*
>(std::realloc(m_outerIndex, (m_outerSize +
outerChange + 1) *
sizeof(StorageIndex)));
597 StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
598 for(Index i=m_outerSize; i<m_outerSize+
outerChange+1; i++)
599 m_outerIndex[i] = last;
614 m_innerSize = IsRowMajor ?
cols :
rows;
616 if (m_outerSize !=
outerSize || m_outerSize==0)
618 std::free(m_outerIndex);
619 m_outerIndex =
static_cast<StorageIndex*
>(std::malloc((
outerSize + 1) *
sizeof(StorageIndex)));
620 if (!m_outerIndex) internal::throw_std_bad_alloc();
626 std::free(m_innerNonZeros);
629 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(StorageIndex));
634 void resizeNonZeros(Index
size)
650 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
652 check_template_parameters();
658 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
660 check_template_parameters();
665 template<
typename OtherDerived>
667 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
670 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
671 check_template_parameters();
674 *
this = other.derived();
677 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
680 internal::call_assignment_no_alias(*
this, other.derived());
685 template<
typename OtherDerived,
unsigned int UpLo>
687 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
689 check_template_parameters();
690 Base::operator=(other);
695 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
697 check_template_parameters();
698 *
this = other.derived();
702 template<
typename OtherDerived>
704 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
706 check_template_parameters();
707 initAssignment(other);
712 template<
typename OtherDerived>
714 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
716 check_template_parameters();
717 *
this = other.derived();
725 std::swap(m_outerIndex, other.m_outerIndex);
726 std::swap(m_innerSize, other.m_innerSize);
727 std::swap(m_outerSize, other.m_outerSize);
728 std::swap(m_innerNonZeros, other.m_innerNonZeros);
729 m_data.swap(other.m_data);
736 eigen_assert(
rows() ==
cols() &&
"ONLY FOR SQUARED MATRICES");
737 this->m_data.resize(
rows());
741 std::free(m_innerNonZeros);
746 if (other.isRValue())
748 swap(other.const_cast_derived());
750 else if(
this!=&other)
752 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
753 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
755 initAssignment(other);
758 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
759 m_data = other.m_data;
763 Base::operator=(other);
769#ifndef EIGEN_PARSED_BY_DOXYGEN
770 template<
typename OtherDerived>
771 inline SparseMatrix& operator=(
const EigenBase<OtherDerived>& other)
772 {
return Base::operator=(other.derived()); }
775 template<
typename OtherDerived>
776 EIGEN_DONT_INLINE
SparseMatrix& operator=(
const SparseMatrixBase<OtherDerived>& other);
778 friend std::ostream & operator << (std::ostream & s,
const SparseMatrix& m)
781 s <<
"Nonzero entries:\n";
783 for (Index i=0; i<m.nonZeros(); ++i)
784 s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
786 for (Index i=0; i<m.outerSize(); ++i)
788 Index p = m.m_outerIndex[i];
789 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
792 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
793 for (; k<m.m_outerIndex[i+1]; ++k)
798 s <<
"Outer pointers:\n";
799 for (Index i=0; i<m.outerSize(); ++i)
800 s << m.m_outerIndex[i] <<
" ";
801 s <<
" $" << std::endl;
802 if(!m.isCompressed())
804 s <<
"Inner non zeros:\n";
805 for (Index i=0; i<m.outerSize(); ++i)
806 s << m.m_innerNonZeros[i] <<
" ";
807 s <<
" $" << std::endl;
811 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
818 std::free(m_outerIndex);
819 std::free(m_innerNonZeros);
825# ifdef EIGEN_SPARSEMATRIX_PLUGIN
826# include EIGEN_SPARSEMATRIX_PLUGIN
831 template<
typename Other>
832 void initAssignment(
const Other& other)
834 resize(other.rows(), other.cols());
837 std::free(m_innerNonZeros);
844 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
850 StorageIndex m_index;
851 StorageIndex m_value;
853 typedef StorageIndex value_type;
855 : m_index(convert_index(i)), m_value(convert_index(v))
858 StorageIndex operator[](Index i)
const {
return i==m_index ? m_value : 0; }
863 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
868 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
870 const Index outer = IsRowMajor ? row : col;
871 const Index inner = IsRowMajor ? col : row;
874 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
876 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
877 m_data.index(p) = convert_index(inner);
878 return (m_data.value(p) = 0);
882 static void check_template_parameters()
885 EIGEN_STATIC_ASSERT((Options&(
ColMajor|
RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
888 struct default_prunning_func {
889 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
890 inline bool operator() (
const Index&,
const Index&,
const Scalar& value)
const
892 return !internal::isMuchSmallerThan(value, reference, epsilon);
901template<
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
902void set_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
904 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
905 typedef typename SparseMatrixType::Scalar Scalar;
906 typedef typename SparseMatrixType::StorageIndex StorageIndex;
907 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
912 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
914 for(InputIterator it(begin); it!=end; ++it)
916 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
917 wi(IsRowMajor ? it->col() : it->row())++;
922 for(InputIterator it(begin); it!=end; ++it)
923 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
926 trMat.collapseDuplicates(dup_func);
973template<
typename Scalar,
int _Options,
typename _Index>
974template<
typename InputIterators>
989template<
typename Scalar,
int _Options,
typename _Index>
990template<
typename InputIterators,
typename DupFunctor>
993 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index>,
DupFunctor>(begin, end, *
this,
dup_func);
997template<
typename Scalar,
int _Options,
typename _Index>
998template<
typename DupFunctor>
1001 eigen_assert(!isCompressed());
1003 IndexVector
wi(innerSize());
1005 StorageIndex count = 0;
1007 for(Index
j=0;
j<outerSize(); ++
j)
1009 StorageIndex start = count;
1010 Index
oldEnd = m_outerIndex[
j]+m_innerNonZeros[
j];
1011 for(Index k=m_outerIndex[
j]; k<
oldEnd; ++k)
1013 Index i = m_data.index(k);
1017 m_data.value(
wi(i)) =
dup_func(m_data.value(
wi(i)), m_data.value(k));
1021 m_data.value(count) = m_data.value(k);
1022 m_data.index(count) = m_data.index(k);
1027 m_outerIndex[j] = start;
1029 m_outerIndex[m_outerSize] = count;
1032 std::free(m_innerNonZeros);
1033 m_innerNonZeros = 0;
1034 m_data.resize(m_outerIndex[m_outerSize]);
1037template<
typename Scalar,
int _Options,
typename _Index>
1038template<
typename OtherDerived>
1039EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(
const SparseMatrixBase<OtherDerived>& other)
1041 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1042 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1044 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1045 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1048 const bool needToTranspose = (Flags &
RowMajorBit) != (internal::evaluator<OtherDerived>::Flags &
RowMajorBit);
1049 if (needToTranspose)
1051 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1052 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1058 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1059 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1060 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1061 OtherCopy otherCopy(other.derived());
1062 OtherCopyEval otherCopyEval(otherCopy);
1064 SparseMatrix dest(other.rows(),other.cols());
1069 for (Index j=0; j<otherCopy.outerSize(); ++j)
1070 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1071 ++dest.m_outerIndex[it.index()];
1074 StorageIndex count = 0;
1075 IndexVector positions(dest.outerSize());
1076 for (Index j=0; j<dest.outerSize(); ++j)
1078 Index tmp = dest.m_outerIndex[j];
1079 dest.m_outerIndex[j] = count;
1080 positions[j] = count;
1083 dest.m_outerIndex[dest.outerSize()] = count;
1085 dest.m_data.resize(count);
1087 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1089 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1091 Index pos = positions[it.index()]++;
1092 dest.m_data.index(pos) = j;
1093 dest.m_data.value(pos) = it.value();
1101 if(other.isRValue())
1103 initAssignment(other.derived());
1106 return Base::operator=(other.derived());
1110template<
typename _Scalar,
int _Options,
typename _Index>
1113 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1115 const Index outer = IsRowMajor ? row : col;
1116 const Index inner = IsRowMajor ? col : row;
1123 if(m_data.allocatedSize()==0)
1124 m_data.reserve(2*m_innerSize);
1127 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
1128 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1130 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(StorageIndex));
1134 StorageIndex end = convert_index(m_data.allocatedSize());
1135 for(Index
j=1;
j<=m_outerSize; ++
j)
1136 m_outerIndex[
j] = end;
1141 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
1142 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1143 for(Index
j=0;
j<m_outerSize; ++
j)
1144 m_innerNonZeros[
j] = m_outerIndex[
j+1]-m_outerIndex[
j];
1149 Index
data_end = m_data.allocatedSize();
1155 eigen_internal_assert(m_innerNonZeros[outer]==0);
1159 StorageIndex p = convert_index(m_data.size());
1161 while(
j>=0 && m_innerNonZeros[
j]==0)
1162 m_outerIndex[
j--] = p;
1165 ++m_innerNonZeros[outer];
1166 m_data.append(Scalar(0), inner);
1169 if(
data_end != m_data.allocatedSize())
1174 eigen_internal_assert(
data_end < m_data.allocatedSize());
1175 StorageIndex
new_end = convert_index(m_data.allocatedSize());
1176 for(Index k=outer+1; k<=m_outerSize; ++k)
1180 return m_data.value(p);
1185 if(m_outerIndex[outer+1]==
data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1187 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1190 ++m_innerNonZeros[outer];
1191 m_data.resize(m_data.size()+1);
1194 if(
data_end != m_data.allocatedSize())
1199 eigen_internal_assert(
data_end < m_data.allocatedSize());
1200 StorageIndex
new_end = convert_index(m_data.allocatedSize());
1201 for(Index k=outer+1; k<=m_outerSize; ++k)
1207 Index
startId = m_outerIndex[outer];
1208 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1209 while ( (p >
startId) && (m_data.index(p-1) > inner) )
1211 m_data.index(p) = m_data.index(p-1);
1212 m_data.value(p) = m_data.value(p-1);
1216 m_data.index(p) = convert_index(inner);
1217 return (m_data.value(p) = 0);
1220 if(m_data.size() != m_data.allocatedSize())
1223 m_data.resize(m_data.allocatedSize());
1227 return insertUncompressed(row,col);
1230template<
typename _Scalar,
int _Options,
typename _Index>
1233 eigen_assert(!isCompressed());
1235 const Index outer = IsRowMajor ? row : col;
1236 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1238 Index
room = m_outerIndex[outer+1] - m_outerIndex[outer];
1239 StorageIndex
innerNNZ = m_innerNonZeros[outer];
1243 reserve(SingletonVector(outer,std::max<StorageIndex>(2,
innerNNZ)));
1246 Index startId = m_outerIndex[outer];
1247 Index p = startId + m_innerNonZeros[outer];
1248 while ( (p > startId) && (m_data.index(p-1) > inner) )
1250 m_data.index(p) = m_data.index(p-1);
1251 m_data.value(p) = m_data.value(p-1);
1254 eigen_assert((p<=startId || m_data.index(p-1)!=inner) &&
"you cannot insert an element that already exists, you must call coeffRef to this end");
1256 m_innerNonZeros[outer]++;
1258 m_data.index(p) = inner;
1259 return (m_data.value(p) = 0);
1262template<
typename _Scalar,
int _Options,
typename _Index>
1263EIGEN_DONT_INLINE
typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1265 eigen_assert(isCompressed());
1267 const Index outer = IsRowMajor ? row : col;
1268 const Index inner = IsRowMajor ? col : row;
1270 Index previousOuter = outer;
1271 if (m_outerIndex[outer+1]==0)
1274 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1276 m_outerIndex[previousOuter] = convert_index(m_data.size());
1279 m_outerIndex[outer+1] = m_outerIndex[outer];
1285 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1286 && (size_t(m_outerIndex[outer+1]) == m_data.size());
1288 size_t startId = m_outerIndex[outer];
1290 size_t p = m_outerIndex[outer+1];
1291 ++m_outerIndex[outer+1];
1293 double reallocRatio = 1;
1294 if (m_data.allocatedSize()<=m_data.size())
1297 if (m_data.size()==0)
1306 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1307 reallocRatio = (nnzEstimate-double(m_data.size()))/
double(m_data.size());
1311 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1314 m_data.resize(m_data.size()+1,reallocRatio);
1318 if (previousOuter==-1)
1322 for (Index k=0; k<=(outer+1); ++k)
1323 m_outerIndex[k] = 0;
1325 while(m_outerIndex[k]==0)
1326 m_outerIndex[k++] = 1;
1327 while (k<=m_outerSize && m_outerIndex[k]!=0)
1328 m_outerIndex[k++]++;
1331 k = m_outerIndex[k]-1;
1334 m_data.index(k) = m_data.index(k-1);
1335 m_data.value(k) = m_data.value(k-1);
1344 while (j<=m_outerSize && m_outerIndex[j]!=0)
1345 m_outerIndex[j++]++;
1348 Index k = m_outerIndex[j]-1;
1351 m_data.index(k) = m_data.index(k-1);
1352 m_data.value(k) = m_data.value(k-1);
1358 while ( (p > startId) && (m_data.index(p-1) > inner) )
1360 m_data.index(p) = m_data.index(p-1);
1361 m_data.value(p) = m_data.value(p-1);
1365 m_data.index(p) = inner;
1366 return (m_data.value(p) = 0);
1371template<
typename _Scalar,
int _Options,
typename _Index>
1373 :
evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
General-purpose arrays with easy API for coefficient-wise operations.
Definition Array.h:47
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:65
Pseudo expression representing a solving operation.
Definition Solve.h:63
Definition SparseCompressedBase.h:108
Definition SparseCompressedBase.h:167
Definition SparseCompressedBase.h:28
Index nonZeros() const
Definition SparseCompressedBase.h:46
bool isCompressed() const
Definition SparseCompressedBase.h:97
Index size() const
Definition SparseMatrixBase.h:168
@ Flags
This stores expression Flags flags which may or may not be inherited by new expressions constructed f...
Definition SparseMatrixBase.h:92
NumTraits< Scalar >::Real RealScalar
This is the "real scalar" type; if the Scalar type is already real numbers (e.g.
Definition SparseMatrixBase.h:125
Definition SparseMatrix.h:849
A versatible sparse matrix representation.
Definition SparseMatrix.h:94
Scalar sum() const
Overloaded for performance.
Definition SparseRedux.h:30
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Constructs a sparse matrix from the sparse expression other.
Definition SparseMatrix.h:666
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
The same as setFromTriplets but when duplicates are met the functor dup_func is applied:
Definition SparseMatrix.h:991
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:703
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:161
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:156
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:170
~SparseMatrix()
Destructor.
Definition SparseMatrix.h:816
void conservativeResize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix leaving old values untouched.
Definition SparseMatrix.h:543
void setIdentity()
Sets *this to the identity matrix.
Definition SparseMatrix.h:734
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:152
const Scalar * valuePtr() const
Definition SparseMatrix.h:143
void uncompress()
Turns the matrix into the uncompressed mode.
Definition SparseMatrix.h:490
Scalar * valuePtr()
Definition SparseMatrix.h:147
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:713
void reserve(Index reserveSize)
Preallocates reserveSize non zeros.
Definition SparseMatrix.h:257
SparseMatrix()
Default constructor yielding an empty 0 x 0 matrix.
Definition SparseMatrix.h:649
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:183
Index rows() const
Definition SparseMatrix.h:131
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:165
bool isCompressed() const
Definition SparseCompressedBase.h:97
SparseMatrix(const SparseMatrix &other)
Copy constructor (it performs a deep copy)
Definition SparseMatrix.h:694
void resize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix and initializes it to zero.
Definition SparseMatrix.h:611
Index cols() const
Definition SparseMatrix.h:133
Index innerSize() const
Definition SparseMatrix.h:136
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Constructs a sparse matrix from the sparse selfadjoint view other.
Definition SparseMatrix.h:686
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:640
void prune(const KeepFunc &keep=KeepFunc())
Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predica...
Definition SparseMatrix.h:515
Index outerSize() const
Definition SparseMatrix.h:138
void swap(SparseMatrix &other)
Swaps the content of two sparse matrices of the same type.
Definition SparseMatrix.h:722
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:174
void setZero()
Removes all non zeros but keep allocated memory.
Definition SparseMatrix.h:246
SparseMatrix(Index rows, Index cols)
Constructs a rows x cols empty matrix.
Definition SparseMatrix.h:657
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Fill the matrix *this with the list of triplets defined by the iterator range begin - end.
Definition SparseMatrix.h:975
void makeCompressed()
Turns the matrix into the compressed format.
Definition SparseMatrix.h:459
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Suppresses all nonzeros which are much smaller than reference under the tolerence epsilon.
Definition SparseMatrix.h:502
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:201
DiagonalReturnType diagonal()
Definition SparseMatrix.h:646
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1111
Definition CompressedStorage.h:23
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Like at(), but the search is performed in the range [start,end)
Definition CompressedStorage.h:154
Index searchLowerIndex(Index key) const
Definition CompressedStorage.h:120
@ ColMajor
Storage order is column major (see TopicStorageOrders).
Definition Constants.h:320
@ RowMajor
Storage order is row major (see TopicStorageOrders).
Definition Constants.h:322
const unsigned int LvalueBit
Means the expression has a coeffRef() method, i.e.
Definition Constants.h:138
const unsigned int RowMajorBit
for a matrix, this means that the storage order is row-major.
Definition Constants.h:61
const unsigned int CompressedAccessBit
Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed ...
Definition Constants.h:185
NLOHMANN_BASIC_JSON_TPL_DECLARATION void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL &j1, nlohmann::NLOHMANN_BASIC_JSON_TPL &j2) noexcept(//NOLINT(readability-inconsistent-declaration-parameter-name, cert-dcl58-cpp) is_nothrow_move_constructible< nlohmann::NLOHMANN_BASIC_JSON_TPL >::value &&//NOLINT(misc-redundant-expression) is_nothrow_move_assignable< nlohmann::NLOHMANN_BASIC_JSON_TPL >::value)
exchanges the values of two JSON objects
Definition json.hpp:24418
The type used to identify a dense storage.
Definition Constants.h:490
Eigen::Index Index
The interface type of indices.
Definition EigenBase.h:37
The type used to identify a matrix expression.
Definition Constants.h:505
Holds information about the various numeric (i.e.
Definition NumTraits.h:108
The type used to identify a general sparse storage.
Definition Constants.h:493
Definition CoreEvaluators.h:82
Definition BinaryFunctors.h:24
Definition ForwardDeclarations.h:17