Medial Code Documentation
Loading...
Searching...
No Matches
SparseSelfAdjointView.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11#define EIGEN_SPARSE_SELFADJOINTVIEW_H
12
13namespace Eigen {
14
29namespace internal {
30
31template<typename MatrixType, unsigned int Mode>
32struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33};
34
35template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
36void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
37
38template<int Mode,typename MatrixType,int DestOrder>
39void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
40
41}
42
43template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44 : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45{
46 public:
47
48 enum {
49 Mode = _Mode,
52 };
53
55 typedef typename MatrixType::Scalar Scalar;
56 typedef typename MatrixType::StorageIndex StorageIndex;
58 typedef typename MatrixType::Nested MatrixTypeNested;
60
61 explicit inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
62 {
63 eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
64 }
65
66 inline Index rows() const { return m_matrix.rows(); }
67 inline Index cols() const { return m_matrix.cols(); }
68
70 const _MatrixTypeNested& matrix() const { return m_matrix; }
71 _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
72
78 template<typename OtherDerived>
81 {
82 return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
83 }
84
90 template<typename OtherDerived> friend
96
98 template<typename OtherDerived>
101 {
102 return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
103 }
104
106 template<typename OtherDerived> friend
109 {
110 return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
111 }
112
121 template<typename DerivedU>
122 SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
123
125 // TODO implement twists in a more evaluator friendly fashion
130
131 template<typename SrcMatrixType,int SrcMode>
133 {
134 internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
135 return *this;
136 }
137
138 SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
139 {
140 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
141 return *this = src.twistedBy(pnull);
142 }
143
144 template<typename SrcMatrixType,unsigned int SrcMode>
145 SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
146 {
147 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
148 return *this = src.twistedBy(pnull);
149 }
150
151 void resize(Index rows, Index cols)
152 {
153 EIGEN_ONLY_USED_FOR_DEBUG(rows);
154 EIGEN_ONLY_USED_FOR_DEBUG(cols);
155 eigen_assert(rows == this->rows() && cols == this->cols()
156 && "SparseSelfadjointView::resize() does not actually allow to resize.");
157 }
158
159 protected:
160
161 typename MatrixType::Nested m_matrix;
162 //mutable VectorI m_countPerRow;
163 //mutable VectorI m_countPerCol;
164 private:
165 template<typename Dest> void evalTo(Dest &) const;
166};
167
168/***************************************************************************
169* Implementation of SparseMatrixBase methods
170***************************************************************************/
171
172template<typename Derived>
173template<unsigned int UpLo>
174typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
175{
176 return SparseSelfAdjointView<const Derived, UpLo>(derived());
177}
178
179template<typename Derived>
180template<unsigned int UpLo>
181typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
182{
183 return SparseSelfAdjointView<Derived, UpLo>(derived());
184}
185
186/***************************************************************************
187* Implementation of SparseSelfAdjointView methods
188***************************************************************************/
189
190template<typename MatrixType, unsigned int Mode>
191template<typename DerivedU>
192SparseSelfAdjointView<MatrixType,Mode>&
193SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
194{
195 SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
196 if(alpha==Scalar(0))
197 m_matrix.const_cast_derived() = tmp.template triangularView<Mode>();
198 else
199 m_matrix.const_cast_derived() += alpha * tmp.template triangularView<Mode>();
200
201 return *this;
202}
203
204namespace internal {
205
206// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
207// in the future selfadjoint-ness should be defined by the expression traits
208// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
209template<typename MatrixType, unsigned int Mode>
211{
214
215 static const int AssumeAliasing = 0;
216};
217
219
222
223template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
224struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse, Scalar>
225{
226 typedef typename DstXprType::StorageIndex StorageIndex;
227 template<typename DestScalar,int StorageOrder>
229 {
230 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
231 }
232
233 template<typename DestScalar>
235 {
236 // TODO directly evaluate into dst;
238 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
239 dst = tmp;
240 }
241};
242
243} // end namespace internal
244
245/***************************************************************************
246* Implementation of sparse self-adjoint time dense matrix
247***************************************************************************/
248
249namespace internal {
250
251template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
252inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
253{
254 EIGEN_ONLY_USED_FOR_DEBUG(alpha);
255 // TODO use alpha
256 eigen_assert(alpha==AlphaType(1) && "alpha != 1 is not implemented yet, sorry");
257
258 typedef evaluator<SparseLhsType> LhsEval;
259 typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator;
260 typedef typename SparseLhsType::Scalar LhsScalar;
261
262 enum {
263 LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
265 ((Mode&(Upper|Lower))==(Upper|Lower))
266 || ( (Mode&Upper) && !LhsIsRowMajor)
267 || ( (Mode&Lower) && LhsIsRowMajor),
269 };
270
271 LhsEval lhsEval(lhs);
272
273 for (Index j=0; j<lhs.outerSize(); ++j)
274 {
275 LhsIterator i(lhsEval,j);
276 if (ProcessSecondHalf)
277 {
278 while (i && i.index()<j) ++i;
279 if(i && i.index()==j)
280 {
281 res.row(j) += i.value() * rhs.row(j);
282 ++i;
283 }
284 }
285 for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
286 {
287 Index a = LhsIsRowMajor ? j : i.index();
288 Index b = LhsIsRowMajor ? i.index() : j;
289 LhsScalar v = i.value();
290 res.row(a) += (v) * rhs.row(b);
291 res.row(b) += numext::conj(v) * rhs.row(a);
292 }
293 if (ProcessFirstHalf && i && (i.index()==j))
294 res.row(j) += i.value() * rhs.row(j);
295 }
296}
297
298
299template<typename LhsView, typename Rhs, int ProductType>
301{
302 template<typename Dest>
303 static void evalTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs)
304 {
305 typedef typename LhsView::_MatrixTypeNested Lhs;
306 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
307 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
308 LhsNested lhsNested(lhsView.matrix());
309 RhsNested rhsNested(rhs);
310
311 dst.setZero();
312 internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, typename Dest::Scalar(1));
313 }
314};
315
316template<typename Lhs, typename RhsView, int ProductType>
318{
319 template<typename Dest>
320 static void evalTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView)
321 {
322 typedef typename RhsView::_MatrixTypeNested Rhs;
323 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
324 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
325 LhsNested lhsNested(lhs);
326 RhsNested rhsNested(rhsView.matrix());
327
328 dst.setZero();
329 // transpoe everything
331 internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1));
332 }
333};
334
335// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
336// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
337
338template<typename LhsView, typename Rhs, int ProductTag>
340 : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
341{
343 typedef typename XprType::PlainObject PlainObject;
345
347 : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
348 {
349 ::new (static_cast<Base*>(this)) Base(m_result);
351 }
352
353protected:
354 typename Rhs::PlainObject m_lhs;
355 PlainObject m_result;
356};
357
358template<typename Lhs, typename RhsView, int ProductTag>
360 : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
361{
363 typedef typename XprType::PlainObject PlainObject;
365
367 : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
368 {
369 ::new (static_cast<Base*>(this)) Base(m_result);
371 }
372
373protected:
374 typename Lhs::PlainObject m_rhs;
375 PlainObject m_result;
376};
377
378} // namespace internal
379
380/***************************************************************************
381* Implementation of symmetric copies and permutations
382***************************************************************************/
383namespace internal {
384
385template<int Mode,typename MatrixType,int DestOrder>
386void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
387{
388 typedef typename MatrixType::StorageIndex StorageIndex;
389 typedef typename MatrixType::Scalar Scalar;
391 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
392
393 Dest& dest(_dest.derived());
394 enum {
395 StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
396 };
397
398 Index size = mat.rows();
399 VectorI count;
400 count.resize(size);
401 count.setZero();
402 dest.resize(size,size);
403 for(Index j = 0; j<size; ++j)
404 {
405 Index jp = perm ? perm[j] : j;
406 for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
407 {
408 Index i = it.index();
409 Index r = it.row();
410 Index c = it.col();
411 Index ip = perm ? perm[i] : i;
412 if(Mode==(Upper|Lower))
413 count[StorageOrderMatch ? jp : ip]++;
414 else if(r==c)
415 count[ip]++;
416 else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
417 {
418 count[ip]++;
419 count[jp]++;
420 }
421 }
422 }
423 Index nnz = count.sum();
424
425 // reserve space
426 dest.resizeNonZeros(nnz);
427 dest.outerIndexPtr()[0] = 0;
428 for(Index j=0; j<size; ++j)
429 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
430 for(Index j=0; j<size; ++j)
431 count[j] = dest.outerIndexPtr()[j];
432
433 // copy data
434 for(StorageIndex j = 0; j<size; ++j)
435 {
436 for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
437 {
438 StorageIndex i = internal::convert_index<StorageIndex>(it.index());
439 Index r = it.row();
440 Index c = it.col();
441
442 StorageIndex jp = perm ? perm[j] : j;
443 StorageIndex ip = perm ? perm[i] : i;
444
445 if(Mode==(Upper|Lower))
446 {
447 Index k = count[StorageOrderMatch ? jp : ip]++;
448 dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
449 dest.valuePtr()[k] = it.value();
450 }
451 else if(r==c)
452 {
453 Index k = count[ip]++;
454 dest.innerIndexPtr()[k] = ip;
455 dest.valuePtr()[k] = it.value();
456 }
457 else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
458 {
459 if(!StorageOrderMatch)
460 std::swap(ip,jp);
461 Index k = count[jp]++;
462 dest.innerIndexPtr()[k] = ip;
463 dest.valuePtr()[k] = it.value();
464 k = count[ip]++;
465 dest.innerIndexPtr()[k] = jp;
466 dest.valuePtr()[k] = numext::conj(it.value());
467 }
468 }
469 }
470}
471
472template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
473void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
474{
475 typedef typename MatrixType::StorageIndex StorageIndex;
476 typedef typename MatrixType::Scalar Scalar;
477 SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
478 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
479 enum {
480 SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
481 StorageOrderMatch = int(SrcOrder) == int(DstOrder),
482 DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
483 SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
484 };
485
486 Index size = mat.rows();
487 VectorI count(size);
488 count.setZero();
489 dest.resize(size,size);
490 for(StorageIndex j = 0; j<size; ++j)
491 {
492 StorageIndex jp = perm ? perm[j] : j;
493 for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
494 {
495 StorageIndex i = it.index();
496 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
497 continue;
498
499 StorageIndex ip = perm ? perm[i] : i;
500 count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
501 }
502 }
503 dest.outerIndexPtr()[0] = 0;
504 for(Index j=0; j<size; ++j)
505 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
506 dest.resizeNonZeros(dest.outerIndexPtr()[size]);
507 for(Index j=0; j<size; ++j)
508 count[j] = dest.outerIndexPtr()[j];
509
510 for(StorageIndex j = 0; j<size; ++j)
511 {
512
513 for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
514 {
515 StorageIndex i = it.index();
516 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
517 continue;
518
519 StorageIndex jp = perm ? perm[j] : j;
520 StorageIndex ip = perm? perm[i] : i;
521
522 Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
523 dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
524
525 if(!StorageOrderMatch) std::swap(ip,jp);
526 if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
527 dest.valuePtr()[k] = numext::conj(it.value());
528 else
529 dest.valuePtr()[k] = it.value();
530 }
531 }
532}
533
534}
535
536// TODO implement twists in a more evaluator friendly fashion
537
538namespace internal {
539
540template<typename MatrixType, int Mode>
541struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
542};
543
544}
545
546template<typename MatrixType,int Mode>
548 : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
549{
550 public:
551 typedef typename MatrixType::Scalar Scalar;
552 typedef typename MatrixType::StorageIndex StorageIndex;
553 enum {
556 };
557 protected:
559 public:
561 typedef typename MatrixType::Nested MatrixTypeNested;
563
564 SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
565 : m_matrix(mat), m_perm(perm)
566 {}
567
568 inline Index rows() const { return m_matrix.rows(); }
569 inline Index cols() const { return m_matrix.cols(); }
570
571 const NestedExpression& matrix() const { return m_matrix; }
572 const Perm& perm() const { return m_perm; }
573
574 protected:
575 MatrixTypeNested m_matrix;
576 const Perm& m_perm;
577
578};
579
580namespace internal {
581
582template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
583struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar>, Sparse2Sparse>
584{
586 typedef typename DstXprType::StorageIndex DstIndex;
587 template<int Options>
589 {
590 // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
591 SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
592 internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
593 dst = tmp;
594 }
595
596 template<typename DestType,unsigned int DestMode>
598 {
599 internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
600 }
601};
602
603} // end namespace internal
604
605} // end namespace Eigen
606
607#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
Expression of the product of two arbitrary matrices or vectors.
Definition Product.h:111
Pseudo expression representing a solving operation.
Definition Solve.h:63
A versatible sparse matrix representation.
Definition SparseMatrix.h:94
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseSelfAdjointView.h:45
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Efficient dense vector/matrix times sparse self-adjoint matrix product.
Definition SparseSelfAdjointView.h:108
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
Perform a symmetric rank K update of the selfadjoint matrix *this: where u is a vector or matrix.
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
Efficient sparse self-adjoint matrix times dense vector/matrix product.
Definition SparseSelfAdjointView.h:100
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Definition SparseSelfAdjointView.h:80
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition SparseSelfAdjointView.h:92
SparseSymmetricPermutationProduct< _MatrixTypeNested, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Definition SparseSelfAdjointView.h:126
Definition SparseSelfAdjointView.h:549
@ Lower
View matrix as a lower triangular matrix.
Definition Constants.h:204
@ Upper
View matrix as an upper triangular matrix.
Definition Constants.h:206
@ ColMajor
Storage order is column major (see TopicStorageOrders).
Definition Constants.h:320
@ RowMajor
Storage order is row major (see TopicStorageOrders).
Definition Constants.h:322
const unsigned int RowMajorBit
for a matrix, this means that the storage order is row-major.
Definition Constants.h:61
NLOHMANN_BASIC_JSON_TPL_DECLARATION void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL &j1, nlohmann::NLOHMANN_BASIC_JSON_TPL &j2) noexcept(//NOLINT(readability-inconsistent-declaration-parameter-name, cert-dcl58-cpp) is_nothrow_move_constructible< nlohmann::NLOHMANN_BASIC_JSON_TPL >::value &&//NOLINT(misc-redundant-expression) is_nothrow_move_assignable< nlohmann::NLOHMANN_BASIC_JSON_TPL >::value)
exchanges the values of two JSON objects
Definition json.hpp:24418
Definition Constants.h:511
Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor Matrix...
Definition EigenBase.h:29
Eigen::Index Index
The interface type of indices.
Definition EigenBase.h:37
Definition Constants.h:520
Definition AssignEvaluator.h:677
Definition AssignEvaluator.h:684
Definition Constants.h:525
Definition SparseAssign.h:61
Definition SparseSelfAdjointView.h:218
Definition SparseUtil.h:138
Definition AssignmentFunctors.h:21
Definition CoreEvaluators.h:75
Definition CoreEvaluators.h:82
Definition ProductEvaluators.h:81
Definition ForwardDeclarations.h:165
Definition ForwardDeclarations.h:17
Definition Meta.h:30
Definition inference.c:32