diff --git a/doc/LOWRANKBRANCH-BACKPORT-CHANGES b/doc/LOWRANKBRANCH-BACKPORT-CHANGES new file mode 100644 index 0000000000000000000000000000000000000000..6f002424cf22e0d31f824c41b46be176eb1373ad --- /dev/null +++ b/doc/LOWRANKBRANCH-BACKPORT-CHANGES @@ -0,0 +1,106 @@ +CHANGES REQUIRING CODE ADAPTATION +--------------------------------- +--------------------------------- + +* file dune/solvers/norms/diagnorm.hh : class DiagNorm + ° introduced template parameters in order to use it with arbitrary matrix blocks + \tparam Diagonal some container type having the random access operator[]; used to store the diagonal entries of the matrix + \tparam VectorType the vector type the norm may be applied to + ° old implementation remains as template specialization +--------------------------------------------------------------------------------------- +! ° REQUIRED CODE CHANGES: where "DiagNorm" was used, now "DiagNorm<>" is required ! +--------------------------------------------------------------------------------------- + +* file dune/solvers/norms/fullnorm.hh : class FullNorm + ° introduced template parameters in order to use it with arbitrary matrix blocks + \tparam LowRankFactor the type of the factor used to represent the low rank operator + \tparam VectorType the vector type the norm may be applied to + ° old implementation remains as template specialization +--------------------------------------------------------------------------------------- +! ° REQUIRED CODE CHANGES: where "FullNorm" was used, now "FullNorm<>" is required ! +--------------------------------------------------------------------------------------- + + +CHANGES POSSIBLY AFFECTING PERFOMANCE (INCLUDING BUGFIXES) +---------------------------------------------------------- +---------------------------------------------------------- + +* file dune/solvers/common/staticmatrixtools.hh + ° bugfix for addToDiagonal concerning template specialization + +* file dune/solvers/norms/energynorm.hh + ° catch slightly negative values of normSquared and return zero instead + ° "negativity tolerance" may be set in constructor; default is 1e-10 + +EXTENSION OF FUNCTIONALITY +-------------------------- +-------------------------- + +* file dune/solvers/transferoperators/compressedmultigridtransfer.hh : class CompressedMultigridTransfer + ° introduced template specialization for matrices of SumOperator<SparseMatrixType, LowRankMatrixType>-type + +* file dune/solvers/common/staticmatrixtools.hh + ° added some type promotions + (° bugfix for addToDiagonal concerning template specialization) + +* file dune/solvers/common/genericvectortools.hh : + ° added template specialization of method static void resize for VectorTypeB=SumOperator<...> + +* file dune/solvers/iterationsteps/truncatedblockgsstep.hh : + ° introduced template specialization for matrices of SumOperator<SparseMatrixType, LowRankMatrixType>-type + +* file dune/solvers/operators/nulloperator.hh : + ° introduced method umtv + ° introduced method usmtv + ° export field_type and size_type + ° introduced methods M(), N() + ° introduced operator*= + ° introduced operator[] + ° introduced operator[] in RowDummy + +* file dune/solvers/operators/lowrankoperator.hh : + ° lowRankFactor_ now stored as a pointer + ° introduced default constructor + ° introduced method mmv + (° renamed method getLowRankFactor->lowrankFactor) + ° added nonconst getter lowrankFactor + +* file dune/solvers/operators/sumoperator.hh : + ° summands now stored as pointers + ° introduced default constructor + ° introduced destructor + ° introduced method mmv + ° introduced methods M(), N() + (° renamed method getSOMETHINGMatrix->SOMETHINGMatrix) + ° added nonconst getters + +FURTHER CHANGES +------------------------------------- +------------------------------------- + +* copied file dune/solvers/common/arithmetic.hh from dune-fufem + +* file dune/solvers/transferoperators/truncatedcompressedmgtransfer.cc + ° harmonized template parameter names with base class + +* file dune/solvers/test/sumoperatortest.cc + ° extended test, now also testing + default construction + getter methods + consistency of N() + +* file dune/solvers/test/lowrankoperatortest.cc + ° extended test, now also testing + method mmv + for fixed block sizes 1...4 + +* use size_t instead of int in order to avoid signed/unsigned mismatch warnings + ° file dune/solvers/iterationsteps/truncatedblockgsstep.hh + ° file dune/solvers/iterationsteps/multigridstep.hh + ° file dune/solvers/norms/fullnorm.hh + ° file dune/solvers/norms/diagnorm.hh + ° file dune/solvers/common/staticmatrixtools.hh + +* file dune/solvers/solvers/loopsolver.cc: initialize value in order to avoid warning + +* fixed some typos/white space issues diff --git a/dune/solvers/common/arithmetic.hh b/dune/solvers/common/arithmetic.hh new file mode 100644 index 0000000000000000000000000000000000000000..bc05477d7598c2ed528878a595f4b911d6d844c5 --- /dev/null +++ b/dune/solvers/common/arithmetic.hh @@ -0,0 +1,203 @@ +#ifndef ARITHMETIC_HH +#define ARITHMETIC_HH + + +#include <dune/common/fvector.hh> +#include <dune/common/fmatrix.hh> +#include <dune/istl/diagonalmatrix.hh> +#include <dune/istl/scaledidmatrix.hh> + +/** \brief Namespace containing helper classes and functions for arithmetic operations + * + * Everything in this namespace is experimental and might change + * in the near future. Especially the naming of namespace, structs, + * and functions is not final. + */ +namespace Arithmetic +{ + + /** \brief Class to identify scalar types + * + * Specialize this class for all type that can be used + * like scalar quantities. + */ + template<class T> + struct ScalarTraits + { + enum { isScalar=false }; + }; + + template<> + struct ScalarTraits<float> + { + enum { isScalar=true }; + }; + + template<> + struct ScalarTraits<double> + { + enum { isScalar=true }; + }; + + template<class T> + struct ScalarTraits<Dune::FieldVector<T,1> > + { + enum { isScalar=true }; + }; + + template<class T> + struct ScalarTraits<Dune::FieldMatrix<T,1,1> > + { + enum { isScalar=true }; + }; + + template<class T> + struct ScalarTraits<Dune::DiagonalMatrix<T,1> > + { + enum { isScalar=true }; + }; + + template<class T> + struct ScalarTraits<Dune::ScaledIdentityMatrix<T,1> > + { + enum { isScalar=true }; + }; + + + /** \brief Class to identify matrix types and extract information + * + * Specialize this class for all type that can be used like a matrix. + */ + template<class T> + struct MatrixTraits + { + enum { isMatrix=false }; + enum { rows=-1}; + enum { cols=-1}; + }; + + template<class T, int n, int m> + struct MatrixTraits<Dune::FieldMatrix<T,n,m> > + { + enum { isMatrix=true }; + enum { rows=n}; + enum { cols=m}; + }; + + template<class T, int n> + struct MatrixTraits<Dune::DiagonalMatrix<T,n> > + { + enum { isMatrix=true }; + enum { rows=n}; + enum { cols=n}; + }; + + template<class T, int n> + struct MatrixTraits<Dune::ScaledIdentityMatrix<T,n> > + { + enum { isMatrix=true }; + enum { rows=n}; + enum { cols=n}; + }; + + + /** \brief Internal helper class for product operations + * + */ + template<class A, class B, class C, bool AisScalar, bool BisScalar, bool CisScalar> + struct ProductHelper + { + static void addProduct(A& a, const B& b, const C& c) + { + b.umv(c, a); + } + }; + + template<class T, int n, int m, int k> + struct ProductHelper<Dune::FieldMatrix<T,n,k>, Dune::FieldMatrix<T,n,m>, Dune::FieldMatrix<T,m,k>, false, false, false> + { + typedef Dune::FieldMatrix<T,n,k> A; + typedef Dune::FieldMatrix<T,n,m> B; + typedef Dune::FieldMatrix<T,m,k> C; + static void addProduct(A& a, const B& b, const C& c) + { + for (size_t row = 0; row < n; ++row) + { + for (size_t col = 0 ; col < k; ++col) + { + for (size_t i = 0; i < m; ++i) + a[row][col] += b[row][i]*c[i][col]; + } + } + } + }; + + template<class T, int n> + struct ProductHelper<Dune::DiagonalMatrix<T,n>, Dune::DiagonalMatrix<T,n>, Dune::DiagonalMatrix<T,n>, false, false, false> + { + typedef Dune::DiagonalMatrix<T,n> A; + typedef A B; + typedef B C; + static void addProduct(A& a, const B& b, const C& c) + { + for (size_t i=0; i<n; ++i) + a.diagonal(i) += b.diagonal(i)*c.diagonal(i); + } + }; + + template<class T, int n> + struct ProductHelper<Dune::FieldMatrix<T,n,n>, Dune::DiagonalMatrix<T,n>, Dune::DiagonalMatrix<T,n>, false, false, false> + { + typedef Dune::FieldMatrix<T,n,n> A; + typedef Dune::DiagonalMatrix<T,n> B; + typedef B C; + static void addProduct(A& a, const B& b, const C& c) + { + for (size_t i=0; i<n; ++i) + a[i][i] += b.diagonal(i)*c.diagonal(i); + } + }; + + /** \brief Specialization for b being a scalar type and (a not a matrix type) + */ + template<class A, class B, class C, bool AisScalar, bool CisScalar> + struct ProductHelper<A, B, C, AisScalar, true, CisScalar> + { + static void addProduct(A& a, const B& b, const C& c) + { + a.axpy(b, c); + } + }; + + template<class A, class B, class C> + struct ProductHelper<A, B, C, true, true, true> + { + static void addProduct(A& a, const B& b, const C& c) + { + a += b*c; + } + }; + + + + /** \brief Add a product to some matrix or vector + * + * This function computes a+=b*c. + * + * This functions should tolerate all meaningful + * combinations of scalars, vectors, and matrices. + * + * a,b,c could be matrices with appropriate + * dimensions. But b can also always be a scalar + * represented by a 1-dim vector or a 1 by 1 matrix. + */ + template<class A, class B, class C> + void addProduct(A& a, const B& b, const C& c) + { + ProductHelper<A,B,C,ScalarTraits<A>::isScalar, ScalarTraits<B>::isScalar, ScalarTraits<C>::isScalar>::addProduct(a,b,c); + } + + +}; + +#endif diff --git a/dune/solvers/common/genericvectortools.hh b/dune/solvers/common/genericvectortools.hh index a77b2b62f352d585669dc028cb179e458ce520e8..30a9a954fb31fb3b33c559c4ba8ef9d9556a21a4 100644 --- a/dune/solvers/common/genericvectortools.hh +++ b/dune/solvers/common/genericvectortools.hh @@ -8,9 +8,12 @@ #include<iostream> #include<bitset> -#include "dune/common/fvector.hh" +#include <dune/common/fvector.hh> + #include <dune/istl/bcrsmatrix.hh> +#include <dune/solvers/operators/sumoperator.hh> + /** \brief Various tools for working with istl vectors of arbitrary nesting depth */ struct GenericVector @@ -80,6 +83,16 @@ struct GenericVector GenericVector::resize(a[it.index()], *(it->begin())); } + template <class VectorTypeA, class SparseMatrixType, class LowRankMatrixType> + static void resize(VectorTypeA& a, const SumOperator<SparseMatrixType, LowRankMatrixType>& b) + { + a.resize(b.N()); + typename SparseMatrixType::const_iterator it = b.sparseMatrix().begin(); + typename SparseMatrixType::const_iterator end = b.sparseMatrix().end(); + for(; it!=end; ++it) + GenericVector::resize(a[it.index()], *(it->begin())); + } + template <class field_type, int n, class VectorTypeB> static void resize(Dune::FieldVector<field_type,n>& a, const VectorTypeB& b) {} diff --git a/dune/solvers/iterationsteps/multigridstep.hh b/dune/solvers/iterationsteps/multigridstep.hh index e871019fca6e95b3e6e1eec3d95416f5fa1c63b9..49484036bff62509a8cd674e095a587faf4f025e 100644 --- a/dune/solvers/iterationsteps/multigridstep.hh +++ b/dune/solvers/iterationsteps/multigridstep.hh @@ -149,7 +149,7 @@ void setTransferOperators(const DerivedTransferHierarchy& transfer) { mgTransfer_.resize(transfer.size()); - for(int j=0; j<transfer.size(); ++j) + for(size_t j=0; j<transfer.size(); ++j) mgTransfer_[j] = transfer[j]; } diff --git a/dune/solvers/iterationsteps/truncatedblockgsstep.hh b/dune/solvers/iterationsteps/truncatedblockgsstep.hh index 50bf06fb81f9602404c15276a78450aa19f41b56..4d1c0c522732cf6f72c0d8ffbbb00b858a4de394 100644 --- a/dune/solvers/iterationsteps/truncatedblockgsstep.hh +++ b/dune/solvers/iterationsteps/truncatedblockgsstep.hh @@ -4,7 +4,9 @@ #include <dune/common/bitsetvector.hh> #include <dune/solvers/iterationsteps/lineariterationstep.hh> - +#include <dune/solvers/operators/sumoperator.hh> +#include <dune/solvers/common/staticmatrixtools.hh> +#include <dune/solvers/common/arithmetic.hh> namespace TruncatedBlockGSStepNS { @@ -60,7 +62,7 @@ struct RecursiveGSStep typedef typename MType::block_type MBlock; typedef typename VType::block_type VBlock; - for(int row=0; row<mat.N(); ++row) + for (size_t row=0; row<mat.N(); ++row) { VBlock r = rhs[row]; const MBlock* Aii=0; @@ -69,7 +71,7 @@ struct RecursiveGSStep ColumnIterator end = mat[row].end(); for(; it!=end; ++it) { - int col = it.index(); + size_t col = it.index(); if (col == row) Aii = &(*it); else @@ -93,7 +95,7 @@ struct RecursiveGSStep<1> typedef typename MType::block_type MBlock; typedef typename VType::block_type VBlock; - for(int i=0; i<mat.N(); ++i) + for (size_t i=0; i<mat.N(); ++i) { const MBlock& Aii = mat[i][i]; if (Aii>0) @@ -115,4 +117,117 @@ struct RecursiveGSStep<1> } // end namespace TruncatedBlockGSStepNS +template<class SparseMatrixType, class LowRankMatrixType, class VectorType, class BitVectorType> +class TruncatedBlockGSStep<SumOperator<SparseMatrixType, LowRankMatrixType>, VectorType, BitVectorType> + : public LinearIterationStep<SumOperator<SparseMatrixType, LowRankMatrixType>, VectorType, BitVectorType> +{ + typedef SumOperator<SparseMatrixType, LowRankMatrixType> MatrixType; + typedef typename VectorType::block_type VectorBlock; + typedef typename VectorType::field_type field_type; + +public: + + //! Default constructor. Doesn't init anything + TruncatedBlockGSStep() {} + + //! Constructor with a linear problem + TruncatedBlockGSStep(MatrixType& mat, VectorType& x, VectorType& rhs) + : LinearIterationStep<MatrixType,VectorType>(mat, x, rhs) + {} + + virtual VectorType getSol() + { + return *(this->x_); + } + + //! Perform one iteration + virtual void iterate() + { + typedef typename SparseMatrixType::row_type::ConstIterator SparseMatrixColumnIterator; + typedef typename LowRankMatrixType::LowRankFactorType::row_type::ConstIterator LowRankFactorColumnIterator; + typedef typename SparseMatrixType::block_type SpBlock; + typedef typename LowRankMatrixType::block_type LRBlock; + typedef typename StaticMatrix::Promote<SpBlock, LRBlock>::Type MBlock; + typedef typename VectorType::block_type VBlock; + + const MatrixType& mat = *this->mat_; + const SparseMatrixType& sparse_mat = mat.sparseMatrix(); + const LowRankMatrixType& lowrank_mat = mat.lowRankMatrix(); + const VectorType& rhs = *this->rhs_; + VectorType& x = *this->x_; + const BitVectorType& ignore = *this->ignoreNodes_; + + // compute m*x once and only add/subtract single summands in the iterations later + VectorType mx(lowrank_mat.lowRankFactor().N()); + lowrank_mat.lowRankFactor().mv(x,mx); + // remember entries from old iterate + VBlock xi; + + for(typename SparseMatrixType::size_type row=0; row<sparse_mat.N(); ++row) + { + xi = x[row]; + VBlock r = rhs[row]; + MBlock Aii(0); + + SparseMatrixColumnIterator it = sparse_mat[row].begin(); + SparseMatrixColumnIterator end = sparse_mat[row].end(); + for(; it!=end; ++it) + { + size_t col = it.index(); + if (col == row) + { + StaticMatrix::axpy(Aii, 1.0, *it); + } + else + it->mmv(x[col],r); + } + + // lowrank specific stuff + // add diagonal entry to Aii + for (typename LowRankMatrixType::size_type k=0; k<lowrank_mat.lowRankFactor().N(); ++k) + { + const LRBlock& mki(lowrank_mat.lowRankFactor()[k][row]); + // Aii += m^tm[i][i] + // this should actually be \sum(lr_block^t*lr_block) if the blocks are nonsymmetric + Arithmetic::addProduct(Aii,mki,mki); + + // r -= m^tm*x, where x[row] were zero + mki.mmv(mx[k], r); + VBlock temp; + mki.mv(x[row], temp); + mki.umv(temp,r); + } + + // "solve" Local system by one GS step +// if (Aii!=0) + { + for(typename MBlock::size_type i=0; i<Aii.N(); ++i) + { + const typename MBlock::field_type& aii = Aii[i][i]; + if (aii>0) + { + if (not(ignore[row][i])) + { + x[row][i] = r[i]; + typename MBlock::row_type::Iterator inner_it = Aii[i].begin(); + typename MBlock::row_type::Iterator inner_end = Aii[i].end(); + for(; inner_it!=inner_end; ++inner_it) + if (inner_it.index()!=i) + x[row][i] -= (*inner_it) * x[row][inner_it.index()]; + x[row][i] /= aii; + } + } + } + } + + // update the mx + for (typename LowRankMatrixType::size_type k=0; k<lowrank_mat.lowRankFactor().N(); ++k) + { + const LRBlock& mki(lowrank_mat.lowRankFactor()[k][row]); + mki.umv(x[row]-xi, mx[k]); + } + } + } +}; + #endif diff --git a/dune/solvers/norms/diagnorm.hh b/dune/solvers/norms/diagnorm.hh index e3b59e2edfbf51ddf561757650831eefd45c8dd0..4a047a889fa26dd9ec2043d10043d02b3b4c7e97 100644 --- a/dune/solvers/norms/diagnorm.hh +++ b/dune/solvers/norms/diagnorm.hh @@ -8,24 +8,34 @@ #include "norm.hh" -typedef Dune::BlockVector<Dune::FieldVector <double,1> > VectorType; - -class DiagNorm: public Norm<VectorType> +/** \brief Vector norm induced by (block) diagonal linear operator + * + * \tparam Diagonal some container type having the random access operator[]; used to store the diagonal entries of the matrix + * \tparam VectorType the vector type the norm may be applied to + */ +template <class Diagonal=Dune::BlockVector<Dune::FieldVector <double,1> >, class VectorType=Dune::BlockVector<Dune::FieldVector <double,1> > > +class DiagNorm: + public Norm<VectorType> { typedef typename VectorType::size_type SizeType; public: - DiagNorm(double alpha, const VectorType &d) : + DiagNorm(const double alpha, const Diagonal &d) : d(d), alpha(alpha) {} //! Compute the norm of the given vector - double operator()(const VectorType &v) const { + double operator()(const VectorType &v) const + { double r = 0.0; + typename VectorType::value_type Dv(0.0); for(SizeType row = 0; row < v.size(); ++row) - r += d[row] * v[row] * v[row]; + { + d[row].mv(v[row],Dv); + r += Dv*v[row]; + } return sqrt(fabs(alpha * r)); } @@ -35,7 +45,53 @@ class DiagNorm: public Norm<VectorType> { double r = 0.0; + typename VectorType::value_type Dv(0.0); for(SizeType row = 0; row < v1.size(); ++row) + { + d[row].mv(v1[row]-v2[row],Dv); + r += Dv*(v1[row]-v2[row]); + } + + return sqrt(fabs(alpha * r)); + } + + private: + const Diagonal& d; + + const double alpha; + +}; + +template<> +class DiagNorm<Dune::BlockVector<Dune::FieldVector <double,1> >, Dune::BlockVector<Dune::FieldVector <double,1> > >: + public Norm<Dune::BlockVector<Dune::FieldVector <double,1> > > +{ + typedef Dune::BlockVector<Dune::FieldVector <double,1> > VectorType; + typedef VectorType::size_type SizeType; + + public: + DiagNorm(const double alpha, const VectorType &d) : + d(d), + alpha(alpha) + {} + + //! Compute the norm of the given vector + double operator()(const VectorType &v) const + { + double r = 0.0; + + for(SizeType row = 0; row < v.size(); ++row) + r += d[row] * v[row] * v[row]; + + return sqrt(fabs(alpha * r)); + } + + //! Compute the norm of the difference of two vectors + double diff(const VectorType &v1, const VectorType &v2) const + { + double r = 0.0; + + for (SizeType row = 0; row < v1.size(); ++row) r += (double)d[row] * (v1[row]-v2[row]) * (v1[row] - v2[row]); return sqrt(fabs(alpha * r)); diff --git a/dune/solvers/norms/energynorm.hh b/dune/solvers/norms/energynorm.hh index 319e2219e3c2065200169a82d33639b11947ea55..86e68a17e0ea3676834466e99f934eeb3fb9dd42 100644 --- a/dune/solvers/norms/energynorm.hh +++ b/dune/solvers/norms/energynorm.hh @@ -15,7 +15,7 @@ * know the matrix in advance. This is, for example, the case * for the coarse level matrices constructed by a multilevel solver. * - * Be carefull: This matrix is not the one representing the preconditioner + * Be careful: This matrix is not the one representing the preconditioner * induced by the LinearIterationStep. * * \todo Elaborate documentation. @@ -28,14 +28,14 @@ /** \brief The type used for the result */ typedef typename DiscFuncType::field_type field_type; - EnergyNorm() : iterationStep_(NULL), matrix_(NULL) {} + EnergyNorm(const double tol=1e-10 ) : iterationStep_(NULL), matrix_(NULL), tol_(tol) {} - EnergyNorm(LinearIterationStep<OperatorType, DiscFuncType>& it) - : iterationStep_(&it), matrix_(NULL) + EnergyNorm(LinearIterationStep<OperatorType, DiscFuncType>& it, const double tol=1e-10) + : iterationStep_(&it), matrix_(NULL), tol_(tol) {} - EnergyNorm(const OperatorType& matrix) - : iterationStep_(NULL), matrix_(&matrix) + EnergyNorm(const OperatorType& matrix, const double tol=1e-10) + : iterationStep_(NULL), matrix_(&matrix), tol_(tol) {} void setMatrix(const OperatorType* matrix) { @@ -62,7 +62,8 @@ return sqrt(this->EnergyNorm<OperatorType,DiscFuncType>::normSquared(f)); } - //! Compute the square of the norm of the given vector + /** \brief Compute the square of the norm of the given vector + \todo This could be implemented without the temporary. */ virtual double normSquared(const DiscFuncType& f) const { if (iterationStep_ == NULL && matrix_ == NULL) @@ -76,18 +77,47 @@ tmp = 0; A.umv(f, tmp); - return f*tmp; + double ret = f*tmp; + + if (ret < 0) + { + if (ret < -tol_) + { + char msg[1024]; + sprintf(msg, "Supplied linear operator is not positive (semi-)definite: (u,Au) = %e", ret); + DUNE_THROW(Dune::RangeError, msg); + } + ret = 0.0; + } + + return ret; } /** \brief Compute the squared norm for a given vector and matrix \todo This could be implemented without the temporary. */ static field_type normSquared(const DiscFuncType& u, - const OperatorType& A) + const OperatorType& A, + const double tol=1e-10) { DiscFuncType tmp(u.size()); tmp = 0; A.umv(u, tmp); - return u*tmp; + + double ret = u*tmp; + + if (ret < 0) + { + if (ret < -tol) + { + char msg[1024]; + sprintf(msg, "Supplied linear operator is not positive (semi-)definite: (u,Au) = %e", ret); + DUNE_THROW(Dune::RangeError, msg); + } + ret = 0.0; + } + + return ret; + } DUNE_DEPRECATED; protected: @@ -96,6 +126,8 @@ const OperatorType* matrix_; + const double tol_; + }; #endif diff --git a/dune/solvers/norms/fullnorm.hh b/dune/solvers/norms/fullnorm.hh index 3e849828a8eab493e622b27c132e849521828a30..0191cb0241ebbec9fd09beed7fbbbe6338e8619a 100644 --- a/dune/solvers/norms/fullnorm.hh +++ b/dune/solvers/norms/fullnorm.hh @@ -8,45 +8,93 @@ #include "norm.hh" -typedef Dune::BlockVector<Dune::FieldVector<double,1> > Vector; +/** \brief Vector norm induced by dense low rank linear operator M + * + * \f$M = m^t*m\f$ + * \f$\Vert u \Vert_M = (u, Mu)^{1/2} = (mu,mu)^{1/2}\f$ + * + * \tparam LowRankFactor the type of the factor used to represent the low rank operator + * \tparam VectorType the vector type the norm may be applied to + */ +template <class LowRankFactor=Dune::BlockVector<Dune::FieldVector <double,1> >, class VectorType=Dune::BlockVector<Dune::FieldVector <double,1> > > +class FullNorm: public Norm<VectorType> +{ + public: + FullNorm(const double alpha, const LowRankFactor &lowRankFactor) : + lowRankFactor_(lowRankFactor), + alpha(alpha) + {} + + //! Compute the norm of the given vector + double operator()(const VectorType &v) const + { + VectorType r(lowRankFactor_.N()); + r = 0.0; + + lowRankFactor_.umv(v,r); + + return sqrt(fabs(alpha*(r*r))); + } + + //! Compute the norm of the difference of two vectors + double diff(const VectorType &v1, const VectorType &v2) const + { + VectorType r(lowRankFactor_.N()); + r = 0.0; -class FullNorm: public Norm<Vector> + for (typename LowRankFactor::size_type k=0; k<lowRankFactor_.N(); ++k) + for (typename LowRankFactor::size_type i=0; i<lowRankFactor_.M(); ++i) + lowRankFactor_[k][i].umv(v1[i]-v2[i],r[k]); + + return sqrt(fabs(alpha*(r*r))); + } + + private: + const LowRankFactor& lowRankFactor_; + + const double alpha; + +}; + +template<> +class FullNorm<Dune::BlockVector<Dune::FieldVector<double,1> >, Dune::BlockVector<Dune::FieldVector<double,1> > >: + public Norm<Dune::BlockVector<Dune::FieldVector<double,1> > > { - typedef typename VectorType::size_type SizeType; + typedef Dune::BlockVector<Dune::FieldVector<double,1> > VectorType; + typedef VectorType::size_type SizeType; public: - FullNorm(double alpha, const Vector &m) : + FullNorm(const double alpha, const VectorType &m) : m(m), alpha(alpha) {} //! Compute the norm of the given vector - double operator()(const Vector &v) const + double operator()(const VectorType &v) const { double r = 0.0; - for(SizeType row = 0; row < v.size(); ++row) + for (SizeType row = 0; row < v.size(); ++row) r += m[row] * v[row]; return sqrt(fabs(alpha*r*r)); - } + } //! Compute the norm of the difference of two vectors - double diff(const Vector &v1, const Vector &v2) const + double diff(const VectorType &v1, const VectorType &v2) const { double r = 0.0; - for(SizeType row = 0; row < v1.size(); ++row) + for (SizeType row = 0; row < v1.size(); ++row) r += (double)m[row] * (v1[row] - v2[row]); return sqrt(fabs(alpha*r*r)); } private: - const Vector &m; + const VectorType &m; const double alpha; }; - -#endif +#endif diff --git a/dune/solvers/operators/lowrankoperator.hh b/dune/solvers/operators/lowrankoperator.hh index b47da905b7142d6df5dd3bbfb6e30347e4ab08e3..0466a0a4b8cdd95935a4e57c127cb0dbd1b90adf 100644 --- a/dune/solvers/operators/lowrankoperator.hh +++ b/dune/solvers/operators/lowrankoperator.hh @@ -21,45 +21,50 @@ class LowRankOperator //! export size type typedef typename LRFactorType::size_type size_type; + //! default constructor + LowRankOperator(): + factor_allocated_internally_(true) + { + lowRankFactor_ = new LowRankFactorType; + } + //! constructor taking the defining factor as argument LowRankOperator(LowRankFactorType& lrFactor): - lowRankFactor_(lrFactor) + lowRankFactor_(&lrFactor), + factor_allocated_internally_(false) {} + ~LowRankOperator() + { + if (factor_allocated_internally_) + delete lowRankFactor_; + } + //! b += Ax template <class LVectorType, class RVectorType> void umv(const LVectorType& x, RVectorType& b) const { - LVectorType temp(lowRankFactor_.N()); - lowRankFactor_.mv(x,temp); - - typename LowRankFactorType::RowIterator row_end = lowRankFactor_.end(); - typename LowRankFactorType::RowIterator row_it = lowRankFactor_.begin(); - for (; row_it!=row_end; ++row_it) - { - typename LowRankFactorType::ColIterator col_end = row_it->end(); - typename LowRankFactorType::ColIterator col_it = row_it->begin(); - for (; col_it!=col_end; ++col_it) - lowRankFactor_[row_it.index()][col_it.index()].umv(temp[row_it.index()],b[col_it.index()]); - } + LVectorType temp(lowRankFactor_->N()); + lowRankFactor_->mv(x,temp); + lowRankFactor_->umtv(temp,b); } //! b += alpha*Ax template <class LVectorType, class RVectorType> void usmv(const field_type& alpha, const LVectorType& x, RVectorType& b) const { - LVectorType temp(lowRankFactor_.N()); - lowRankFactor_.mv(x,temp); - - typename LowRankFactorType::RowIterator row_end = lowRankFactor_.end(); - typename LowRankFactorType::RowIterator row_it = lowRankFactor_.begin(); - for (; row_it!=row_end; ++row_it) - { - typename LowRankFactorType::ColIterator col_end = row_it->end(); - typename LowRankFactorType::ColIterator col_it = row_it->begin(); - for (; col_it!=col_end; ++col_it) - lowRankFactor_[row_it.index()][col_it.index()].usmv(alpha,temp[row_it.index()],b[col_it.index()]); - } + LVectorType temp(lowRankFactor_->N()); + lowRankFactor_->mv(x,temp); + lowRankFactor_->usmtv(alpha,temp,b); + } + + //! b -= Ax + template <class LVectorType, class RVectorType> + void mmv(const LVectorType& x, RVectorType& b) const + { + LVectorType temp(lowRankFactor_->N()); + lowRankFactor_->mv(x,temp); + lowRankFactor_->usmtv(-1.0,temp,b); } //! b = Ax @@ -71,26 +76,33 @@ class LowRankOperator } //! returns number of rows - size_type N() + const size_type N() const { - return lowRankFactor_.M(); + return lowRankFactor_->M(); } //! returns number of columns - size_type M() + const size_type M() const { - return lowRankFactor_.M(); + return lowRankFactor_->M(); } //! returns the defining factor - const LowRankFactorType& getLowRankFactor() const + const LowRankFactorType& lowRankFactor() const { - return lowRankFactor_; + return *lowRankFactor_; } + //! returns the defining factor + LowRankFactorType& lowRankFactor() + { + return *lowRankFactor_; + } private: //! the defining factor - LowRankFactorType& lowRankFactor_; + LowRankFactorType* lowRankFactor_; + + const bool factor_allocated_internally_; }; diff --git a/dune/solvers/operators/nulloperator.hh b/dune/solvers/operators/nulloperator.hh index f9b2888b151638efed9083619ff67f6c5b989a51..775c95eeeb6d15d32f6eb880fced21811ea5c0bb 100644 --- a/dune/solvers/operators/nulloperator.hh +++ b/dune/solvers/operators/nulloperator.hh @@ -24,6 +24,10 @@ class NullOperator { return zero_; } + BlockType& operator[](size_t i) + { + return zero_; + } }; //! a dummy row @@ -32,6 +36,10 @@ class NullOperator public: //! export the block type typedef BlockType block_type; + //! export the field type + typedef typename block_type::field_type field_type; + //! export the size type + typedef size_t size_type; //! Default constructor NullOperator(){} @@ -51,6 +59,14 @@ class NullOperator void umv(const LVectorType& x, RVectorType& b) const {} + /** \brief transposed Matrix-Vector multiplication + * + * Implements b += N^tx and hence does nothing (N=0 !) + */ + template <class LVectorType, class RVectorType> + void umtv(const LVectorType& x, RVectorType& b) const + {} + /** \brief Matrix-Vector multiplication with scalar multiplication * * Implements b += a*Nx and hence does nothing (N=0 !) @@ -59,6 +75,14 @@ class NullOperator void usmv(const double a, const LVectorType& x, RVectorType& b) const {} + /** \brief transposed Matrix-Vector multiplication with scalar multiplication + * + * Implements b += a*N^tx and hence does nothing (N=0 !) + */ + template <class LVectorType, class RVectorType> + void usmtv(const double a, const LVectorType& x, RVectorType& b) const + {} + /** \brief Matrix-Vector multiplication * * Implements b = Nx and hence does nothing but set b=0 (N=0 !) @@ -75,12 +99,33 @@ class NullOperator return rowDummy_; } + //! random access operator + RowDummy& operator[](size_t i) + { + return rowDummy_; + } + //! return j-th diagonal entry const block_type& diagonal(size_t i) const { return rowDummy_[0]; } + //! multiplication operator with scalar + void operator*=(const field_type&) + {} + + size_type M() const + { + return 0; + } + + size_type N() const + { + return 0; + } + + }; #endif diff --git a/dune/solvers/operators/sumoperator.hh b/dune/solvers/operators/sumoperator.hh index 7869d376537c6de50792e93e5c9fe567b57eed36..4e909c7088ab9224c46727cdacdb7eba3f971fd8 100644 --- a/dune/solvers/operators/sumoperator.hh +++ b/dune/solvers/operators/sumoperator.hh @@ -16,46 +16,97 @@ class SumOperator //! export the summand type typedef LowRankMatrix LowRankMatrixType; + //! default constructor - allocates memory for summand operators internally + SumOperator(): + summands_allocated_internally_(true) + { + sparse_matrix_ = new SparseMatrixType; + lowrank_matrix_ = new LowRankMatrixType; + } + //! construct from summands SumOperator(SparseMatrixType& A, LowRankMatrixType& M): - sparse_matrix_(A), - lowrank_matrix_(M) + sparse_matrix_(&A), + lowrank_matrix_(&M), + summands_allocated_internally_(false) {} + ~SumOperator() + { + if (summands_allocated_internally_) + { + delete sparse_matrix_; + delete lowrank_matrix_; + } + } + //! b += (A+M)x template <class LVectorType, class RVectorType> void umv(const LVectorType& x, RVectorType& b) const { - sparse_matrix_.umv(x,b); - lowrank_matrix_.umv(x,b); + sparse_matrix_->umv(x,b); + lowrank_matrix_->umv(x,b); + } + + //! b -= (A+M)x + template <class LVectorType, class RVectorType> + void mmv(const LVectorType& x, RVectorType& b) const + { + sparse_matrix_->mmv(x,b); + lowrank_matrix_->mmv(x,b); } //! b = (A+M)x template <class LVectorType, class RVectorType> void mv(const LVectorType& x, RVectorType& b) { - sparse_matrix_.mv(x,b); - lowrank_matrix_.umv(x,b); + sparse_matrix_->mv(x,b); + lowrank_matrix_->umv(x,b); + } + + //! return the number of rows + const size_t N() const + { + return sparse_matrix_->N(); + } + + //! return the number of columns + const size_t M() const + { + return sparse_matrix_->M(); + } + + //! return one summand + const SparseMatrixType& sparseMatrix() const + { + return *sparse_matrix_; } //! return one summand - SparseMatrixType& getSparseMatrix() + SparseMatrixType& sparseMatrix() + { + return *sparse_matrix_; + } + + //! return "the other" summand + const LowRankMatrixType& lowRankMatrix() const { - return sparse_matrix_; + return *lowrank_matrix_; } //! return "the other" summand - LowRankMatrixType& getLowRankMatrix() + LowRankMatrixType& lowRankMatrix() { - return lowrank_matrix_; + return *lowrank_matrix_; } private: //! one summand - SparseMatrixType& sparse_matrix_; + SparseMatrixType* sparse_matrix_; //! the other summand - LowRankMatrixType& lowrank_matrix_; - + LowRankMatrixType* lowrank_matrix_; + //! are the summands supplied or stored internally + const bool summands_allocated_internally_; }; #endif diff --git a/dune/solvers/solvers/loopsolver.cc b/dune/solvers/solvers/loopsolver.cc index 5dee67692f51919d127260289b42730182cdbacf..35f3b22e9f77c8acbd98ccbdbed9cac6c14127f1 100644 --- a/dune/solvers/solvers/loopsolver.cc +++ b/dune/solvers/solvers/loopsolver.cc @@ -87,7 +87,7 @@ void ::LoopSolver<VectorType, BitVectorType>::solve() oldSolution -= iterationStep_->getSol(); double normOfCorrection; - double normOfError; + double normOfError=std::numeric_limits<double>::quiet_NaN(); double convRate; normOfCorrection = this->errorNorm_->operator()(oldSolution); diff --git a/dune/solvers/test/lowrankoperatortest.cc b/dune/solvers/test/lowrankoperatortest.cc index 0b3f3c4de8fbe64cb120dcb3a9c0270945498621..d758e2eb714ee54c47c149c2141f83660fbd36a9 100644 --- a/dune/solvers/test/lowrankoperatortest.cc +++ b/dune/solvers/test/lowrankoperatortest.cc @@ -134,6 +134,18 @@ bool check() break; } + lr_op.mmv(x,b); + for (size_t i=0; i<b.size(); ++i) + if (b[i].two_norm()>1e-12) + { + std::cout << "LowRankOperator::mmv does not yield correct result. Difference = " << (b[i] - ref_vec[i]).two_norm() << std::endl; + passed = false; + for (size_t j=0; j<b.size(); ++j) + std::cout << b[j] << std::endl; + std::cout << std::endl; + break; + } + b=1.0; lr_op.mv(x,b); for (size_t i=0; i<b.size(); ++i) @@ -158,7 +170,7 @@ bool check() break; } - LowRankFactorType lr_factor_reborn = lr_op.getLowRankFactor(); + LowRankFactorType lr_factor_reborn = lr_op.lowRankFactor(); if (passed) std::cout << "passed"; @@ -168,12 +180,18 @@ bool check() int main(int argc, char** argv) try { - static const int block_size = BLOCKSIZE; +// static const int block_size = BLOCKSIZE; bool passed(true); - passed = check<Dune::Matrix<Dune::ScaledIdentityMatrix<double, block_size> > >(); - passed = passed and check<Dune::Matrix<Dune::DiagonalMatrix<double, block_size> > >(); + passed = check<Dune::Matrix<Dune::ScaledIdentityMatrix<double, 1> > >(); + passed = passed and check<Dune::Matrix<Dune::DiagonalMatrix<double, 1> > >(); + passed = check<Dune::Matrix<Dune::ScaledIdentityMatrix<double, 2> > >(); + passed = passed and check<Dune::Matrix<Dune::DiagonalMatrix<double, 2> > >(); + passed = check<Dune::Matrix<Dune::ScaledIdentityMatrix<double, 3> > >(); + passed = passed and check<Dune::Matrix<Dune::DiagonalMatrix<double, 3> > >(); + passed = check<Dune::Matrix<Dune::ScaledIdentityMatrix<double, 4> > >(); + passed = passed and check<Dune::Matrix<Dune::DiagonalMatrix<double, 4> > >(); return passed ? 0 : 1; } diff --git a/dune/solvers/test/sumoperatortest.cc b/dune/solvers/test/sumoperatortest.cc index 4ec4c848506929765c3aa74088f0cfe403395269..cf7130ec457d9b62feb50d6f03ea4889e8b980b1 100644 --- a/dune/solvers/test/sumoperatortest.cc +++ b/dune/solvers/test/sumoperatortest.cc @@ -9,6 +9,7 @@ #include <dune/istl/bvector.hh> #include <dune/istl/matrix.hh> +#include <dune/istl/matrixindexset.hh> #include <dune/istl/diagonalmatrix.hh> #include <dune/istl/scaledidmatrix.hh> #include <dune/istl/io.hh> @@ -24,24 +25,28 @@ bool check() { bool passed = true; + typedef LowRankOperator<LowRankFactorType> LowRankMatrixType; typedef typename SparseMatrixType::block_type sp_block_type; typedef typename LowRankFactorType::block_type lr_block_type; size_t block_size = sp_block_type::rows; assert(block_size==lr_block_type::rows); - size_t m=1, n=2; + size_t m=3, n=10; typedef Dune::FieldVector<typename sp_block_type::field_type, sp_block_type::rows> Vblock_type; typedef Dune::BlockVector<Vblock_type> Vector; - Vector x(n), b(n), ref_vec(n); + Vector x(n), b(n), c(n), ref_vec(n); for (size_t i = 0; i<n; ++i) // x[i] = 1.0; x[i] = (1.0*rand()) / RAND_MAX; - b = ref_vec = 0.0; + b = c = ref_vec = 0.0; + + SumOperator<SparseMatrixType,LowRankMatrixType> sum_op_default; LowRankFactorType lr_factor(m,n); - SparseMatrixType sparse_matrix(n,n,4,SparseMatrixType::row_wise); + SparseMatrixType sparse_matrix(n,n,3*n,SparseMatrixType::row_wise); + // create sparse Matrix as tridiagonal matrix typedef typename SparseMatrixType::CreateIterator Iter; @@ -57,15 +62,25 @@ bool check() } // Now the sparsity pattern is fully set up and we can add values + double entry=0.0; for (size_t i=0; i<n; ++i) { if (i>0) - sparse_matrix[i][i-1] = (1.0*rand())/RAND_MAX; - sparse_matrix[i][i]= (1.0*rand())/RAND_MAX; + { + entry = (1.0*rand())/RAND_MAX; + sparse_matrix[i][i-1] = entry; + } + entry = (1.0*rand())/RAND_MAX; + sparse_matrix[i][i]= entry; if (i<n-1) - sparse_matrix[i][i+1] = (1.0*rand())/RAND_MAX; + { + entry = (1.0*rand())/RAND_MAX; + sparse_matrix[i][i+1] = entry; + } } + sum_op_default.sparseMatrix() = sparse_matrix; + // set lowrankfactor to random values typedef typename lr_block_type::RowIterator BlockRowIterator; typedef typename lr_block_type::ColIterator BlockColIterator; @@ -82,7 +97,9 @@ bool check() } } - LowRankOperator<LowRankFactorType> lr_op(lr_factor); + sum_op_default.lowRankMatrix().lowRankFactor() = lr_factor; + + LowRankMatrixType lr_op(lr_factor); std::cout << "Checking SumOperator<" << Dune::className(sparse_matrix) << ", " << Dune::className(lr_op) << " > ... "; @@ -93,8 +110,9 @@ bool check() sparse_matrix.umv(x,ref_vec); sum_op.umv(x,b); + sum_op_default.umv(x,c); for (size_t i=0; i<b.size(); ++i) - if ((b[i] - ref_vec[i]).two_norm()>1e-15) + if ((b[i] - ref_vec[i]).two_norm()>1e-12 or (c[i] - ref_vec[i]).two_norm()>1e-12) { std::cout << "SumOperator::umv does not yield correct result." << std::endl; passed = false; @@ -104,19 +122,44 @@ bool check() break; } - b=1.0; + c=b=1.0; sum_op.mv(x,b); + sum_op_default.mv(x,c); for (size_t i=0; i<b.size(); ++i) - if ((b[i] - ref_vec[i]).two_norm()>1e-15) + if ((b[i] - ref_vec[i]).two_norm()>1e-12 or (c[i] - ref_vec[i]).two_norm()>1e-12) { std::cout << "SumOperator::mv does not yield correct result." << std::endl; passed = false; break; } - LowRankOperator<LowRankFactorType> lr_factor_reborn = sum_op.getLowRankMatrix(); - SparseMatrixType sparse_matrix_reborn = sum_op.getSparseMatrix(); + sum_op.mmv(x,b); + sum_op_default.mmv(x,c); + for (size_t i=0; i<b.size(); ++i) + if (b[i].two_norm()>1e-12 or c[i].two_norm()>1e-12) + { + std::cout << "SumOperator::mmv does not yield correct result." << std::endl; + passed = false; + break; + } + + + LowRankOperator<LowRankFactorType> lr_factor_reborn = sum_op.lowRankMatrix(); + SparseMatrixType sparse_matrix_reborn = sum_op.sparseMatrix(); + + + /* test for consistency of return value of N() */ + if (sum_op.N() != sparse_matrix.N()) + { + std::cout << "SumOperator::N does not return correct value for the SumOperator constructed from given matrices. (returns " << sum_op.N() << ", should return " << sum_op.sparseMatrix().N() << ")" << std::endl; + passed = false; + } + if (sum_op_default.N() !=sparse_matrix.N()) + { + std::cout << "SumOperator::N does not return correct value for the default-constructed SumOperator." << std::endl; + passed = false; + } if (passed) std::cout << "passed"; diff --git a/dune/solvers/transferoperators/compressedmultigridtransfer.hh b/dune/solvers/transferoperators/compressedmultigridtransfer.hh index 37b3ea74ba20374bb514ad094ead08f7972af2e0..cce655cb169c2ac384be4e85fc9bbdcbf8a0c770 100644 --- a/dune/solvers/transferoperators/compressedmultigridtransfer.hh +++ b/dune/solvers/transferoperators/compressedmultigridtransfer.hh @@ -5,6 +5,9 @@ #include <dune/common/fmatrix.hh> #include <dune/common/bitsetvector.hh> +#include <dune/solvers/operators/sumoperator.hh> +#include <dune/solvers/common/staticmatrixtools.hh> + #include "genericmultigridtransfer.hh" #include "multigridtransfer.hh" @@ -142,6 +145,157 @@ public: +protected: + + TransferOperatorType* matrix_; + bool matrixInternallyAllocated; + +}; + +template< + class VectorType, + class BitVectorType, + class SparseMatrixType, + class LowRankMatrixType > +class CompressedMultigridTransfer<VectorType, BitVectorType, SumOperator<SparseMatrixType, LowRankMatrixType> > : + public MultigridTransfer<VectorType, BitVectorType, SumOperator<SparseMatrixType,LowRankMatrixType> > +{ + + enum {blocksize = VectorType::block_type::dimension}; + + typedef typename VectorType::field_type field_type; + +public: + + typedef SumOperator<SparseMatrixType, LowRankMatrixType> OperatorType; + + typedef Dune::BCRSMatrix<Dune::FieldMatrix<field_type,1,1> > TransferOperatorType; + + CompressedMultigridTransfer() + { + matrix_ = new TransferOperatorType; + matrixInternallyAllocated = true; + } + + virtual ~CompressedMultigridTransfer() + { + if (matrixInternallyAllocated) + delete matrix_; + } + + + /** \brief Sets up the operator between two P1 spaces + * + * \param grid The grid + * \param cL The coarse grid level + * \param fL The fine grid level + */ + template <class GridType> + void setup(const GridType& grid, int cL, int fL) + { + GenericMultigridTransfer::setup<TransferOperatorType, GridType, field_type>(*matrix_, grid, cL, fL); + } + + + /** \brief Restrict a function from the fine onto the coarse grid + */ + virtual void restrict(const VectorType& f, VectorType &t) const + { + GenericMultigridTransfer::restrict(*matrix_, f, t, -1); + } + + + /** \brief Restrict a bitfield from the fine onto the coarse grid + */ + virtual void restrictScalarBitField(const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t) const + { + GenericMultigridTransfer::restrictScalarBitField(*matrix_, f, t, -1); + } + + + /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid + */ + virtual void restrict(const BitVectorType& f, BitVectorType& t) const + { + GenericMultigridTransfer::restrictBitField(*matrix_, f, t, -1); + } + + + /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid + * Fine bits only influence their father bits + */ + virtual void restrictToFathers(const BitVectorType& f, BitVectorType& t) const + { + GenericMultigridTransfer::restrictBitFieldToFathers(*matrix_, f, t, -1); + } + + + /** \brief Prolong a function from the coarse onto the fine grid + */ + virtual void prolong(const VectorType& f, VectorType &t) const + { + GenericMultigridTransfer::prolong(*matrix_, f, t, -1); + } + + + /** \brief Galerkin assemble a coarse stiffness matrix + */ + virtual void galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat) const + { + GenericMultigridTransfer::galerkinRestrict(*matrix_, fineMat.sparseMatrix(), coarseMat.sparseMatrix(), -1); + + const typename LowRankMatrixType::LowRankFactorType& lr_fine_factor(fineMat.lowRankMatrix().lowRankFactor()); + typename LowRankMatrixType::LowRankFactorType& lr_coarse_factor(coarseMat.lowRankMatrix().lowRankFactor()); + + lr_coarse_factor = 0; + + for (size_t i=0; i<matrix_->N(); ++i) + { + typename TransferOperatorType::ColIterator j_it = (*matrix_)[i].begin(); + typename TransferOperatorType::ColIterator j_end = (*matrix_)[i].end(); + + for (; j_it!=j_end; ++j_it) + { + size_t j = j_it.index(); + + for (size_t k=0; k<lr_fine_factor.N(); ++k) + StaticMatrix::axpy(lr_coarse_factor[k][j], *j_it, lr_fine_factor[k][i]); + } + } + } + + + /** \brief Set Occupation of Galerkin restricted coarse stiffness matrix + * + * Set occupation of Galerkin restricted coarse matrix. Call this one before + * galerkinRestrict to ensure all non-zeroes are present + * \param fineMat The fine level matrix + * \param coarseMat The coarse level matrix + */ + virtual void galerkinRestrictSetOccupation(const OperatorType& fineMat, OperatorType& coarseMat) const + { + GenericMultigridTransfer::galerkinRestrictSetOccupation(*matrix_, fineMat.sparseMatrix(), coarseMat.sparseMatrix(), -1); + + coarseMat.lowRankMatrix().lowRankFactor().setSize(fineMat.lowRankMatrix().lowRankFactor().N(), matrix_->M()); + } + + + /** \brief Direct access to the operator matrix, if you absolutely want it! */ + const TransferOperatorType& getMatrix() const { + return *matrix_; + } + + /** \brief Set matrix! */ + void setMatrix(TransferOperatorType& matrix) + { + if (matrixInternallyAllocated) + delete matrix_; + matrixInternallyAllocated = false; + matrix_ = &matrix; + } + + + protected: TransferOperatorType* matrix_; diff --git a/dune/solvers/transferoperators/genericmultigridtransfer.hh b/dune/solvers/transferoperators/genericmultigridtransfer.hh index 67a8b5612843d430d489ad0e07c8cd57f2d49b87..2388f0c169deedd29679c75ae1a32533956f314d 100644 --- a/dune/solvers/transferoperators/genericmultigridtransfer.hh +++ b/dune/solvers/transferoperators/genericmultigridtransfer.hh @@ -55,7 +55,7 @@ public: template<class MatrixType> static bool diagonalIsOne(const MatrixType& A, unsigned int j) { - if (j>=A.N()) + if (j>=int(A.N())) return A[0][0]>1-1e-5; return A[j][j]>1-1e-5; } @@ -105,7 +105,7 @@ public: // Get local finite element const FEType& coarseBaseSet = p1FECache.get(cIt->type()); - const int numCoarseBaseFct = coarseBaseSet.localBasis().size(); + const size_t numCoarseBaseFct = coarseBaseSet.localBasis().size(); // preallocate vector for function evaluations std::vector<Dune::FieldVector<field_type,1> > values(numCoarseBaseFct); @@ -126,10 +126,10 @@ public: // Get local finite element const FEType& fineBaseSet = p1FECache.get(fIt->type());; - const int numFineBaseFct = fineBaseSet.localBasis().size(); + const size_t numFineBaseFct = fineBaseSet.localBasis().size(); - for (int j=0; j<numFineBaseFct; j++) + for (size_t j=0; j<numFineBaseFct; j++) { const Dune::LocalKey& jLocalKey = fineBaseSet.localCoefficients().localKey(j); @@ -141,7 +141,7 @@ public: // Evaluate coarse grid base functions coarseBaseSet.localBasis().evaluateFunction(local, values); - for (int i=0; i<numCoarseBaseFct; i++) + for (size_t i=0; i<numCoarseBaseFct; i++) { if (values[i] > 0.001) { @@ -167,7 +167,7 @@ public: // Get local finite element const FEType& coarseBaseSet = p1FECache.get(cIt->type()); - const int numCoarseBaseFct = coarseBaseSet.localBasis().size(); + const size_t numCoarseBaseFct = coarseBaseSet.localBasis().size(); typedef typename GridType::template Codim<0>::Entity EntityType; typedef typename EntityType::HierarchicIterator HierarchicIterator; @@ -191,9 +191,9 @@ public: // Get local finite element const FEType& fineBaseSet = p1FECache.get(fIt->type()); - const int numFineBaseFct = fineBaseSet.localBasis().size(); + const size_t numFineBaseFct = fineBaseSet.localBasis().size(); - for (int j=0; j<numFineBaseFct; j++) + for (size_t j=0; j<numFineBaseFct; j++) { const Dune::LocalKey& jLocalKey = fineBaseSet.localCoefficients().localKey(j); @@ -205,7 +205,7 @@ public: // Evaluate coarse grid base functions coarseBaseSet.localBasis().evaluateFunction(local, values); - for (int i=0; i<numCoarseBaseFct; i++) + for (size_t i=0; i<numCoarseBaseFct; i++) { if (values[i] > 0.001) { diff --git a/dune/solvers/transferoperators/truncatedcompressedmgtransfer.cc b/dune/solvers/transferoperators/truncatedcompressedmgtransfer.cc index 3883973dac3c5dab9eb0fd53efd77ce83bd9d077..55d0755e3b54a53f82297aba71972f8863bfa5be 100644 --- a/dune/solvers/transferoperators/truncatedcompressedmgtransfer.cc +++ b/dune/solvers/transferoperators/truncatedcompressedmgtransfer.cc @@ -1,7 +1,5 @@ - - -template<class DiscFuncType, class BitVectorType, class OperatorType> -void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::prolong(const DiscFuncType& f, DiscFuncType& t, +template<class VectorType, class BitVectorType, class MatrixType> +void TruncatedCompressedMGTransfer<VectorType, BitVectorType, MatrixType>::prolong(const VectorType& f, VectorType& t, const BitVectorType& critical) const { if (f.size() != this->matrix_->M()) @@ -14,12 +12,12 @@ void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::p t.resize(this->matrix_->N()); - typedef typename DiscFuncType::Iterator Iterator; - typedef typename DiscFuncType::ConstIterator ConstIterator; + typedef typename VectorType::Iterator Iterator; + typedef typename VectorType::ConstIterator ConstIterator; typedef typename TransferOperatorType::row_type RowType; typedef typename RowType::ConstIterator ColumnIterator; - + Iterator tIt = t.begin(); for(size_t rowIdx=0; rowIdx<this->matrix_->N(); rowIdx++) { @@ -38,18 +36,14 @@ void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::p if (!critical[rowIdx][i]) (*tIt)[i] += (*cIt)[0][0] * f[cIt.index()][i]; - } - } - ++tIt; } - } -template<class DiscFuncType, class BitVectorType, class OperatorType> -void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::restrict(const DiscFuncType& f, DiscFuncType& t, +template<class VectorType, class BitVectorType, class MatrixType> +void TruncatedCompressedMGTransfer<VectorType, BitVectorType, MatrixType>::restrict(const VectorType& f, VectorType& t, const BitVectorType& critical) const { if (f.size() != this->matrix_->N()) @@ -64,44 +58,38 @@ void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::r t.resize(this->matrix_->M()); t = 0; - typedef typename DiscFuncType::Iterator Iterator; - typedef typename DiscFuncType::ConstIterator ConstIterator; + typedef typename VectorType::Iterator Iterator; + typedef typename VectorType::ConstIterator ConstIterator; typedef typename TransferOperatorType::row_type RowType; typedef typename RowType::ConstIterator ColumnIterator; for (size_t rowIdx=0; rowIdx<this->matrix_->N(); rowIdx++) { - + const RowType& row = (*this->matrix_)[rowIdx]; ColumnIterator cIt = row.begin(); ColumnIterator cEndIt = row.end(); - + for(; cIt!=cEndIt; ++cIt) { - + // The following lines are a matrix-vector loop, but rows belonging // to critical dofs are left out - typename DiscFuncType::block_type& tEntry = t[cIt.index()]; + typename VectorType::block_type& tEntry = t[cIt.index()]; for (int i=0; i<blocksize; i++) { - + if (!critical[rowIdx][i]) tEntry[i] += (*cIt)[0][0] * f[rowIdx][i]; - } - } - } - } - -template<class DiscFuncType, class BitVectorType, class OperatorType> -void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>:: -galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat, +template<class VectorType, class BitVectorType, class MatrixType> +void TruncatedCompressedMGTransfer<VectorType, BitVectorType, MatrixType>:: +galerkinRestrict(const MatrixType& fineMat, MatrixType& coarseMat, const BitVectorType& critical) const { - if (this->recompute_ != NULL && this->recompute_->size() != (unsigned int)this->matrix_->M()) DUNE_THROW(Dune::Exception, "The size of the recompute_-bitfield doesn't match the " << "size of the coarse grid space!"); @@ -109,7 +97,7 @@ galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat, // //////////////////////// // Nonsymmetric case // //////////////////////// - typedef typename OperatorType::row_type RowType; + typedef typename MatrixType::row_type RowType; typedef typename RowType::Iterator ColumnIterator; typedef typename RowType::ConstIterator ConstColumnIterator; @@ -122,31 +110,28 @@ galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat, for (size_t i=0; i<coarseMat.N(); i++) { RowType& row = coarseMat[i]; - + // Loop over all columns of the stiffness matrix ColumnIterator m = row.begin(); ColumnIterator mEnd = row.end(); - + for (; m!=mEnd; ++m) { - + if ((*this->recompute_)[i][0] || (*this->recompute_)[m.index()][0]) *m = 0; - } - } - } // Loop over all rows of the stiffness matrix for (size_t v=0; v<fineMat.N(); v++) { - + const RowType& row = fineMat[v]; // Loop over all columns of the stiffness matrix ConstColumnIterator m = row.begin(); ConstColumnIterator mEnd = row.end(); - + for (; m!=mEnd; ++m) { int w = m.index(); @@ -161,33 +146,29 @@ galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat, // Loop over all coarse grid vectors jv that have w in their support TransferConstColumnIterator jm = (*this->matrix_)[w].begin(); TransferConstColumnIterator jmEnd = (*this->matrix_)[w].end(); - + for (; jm!=jmEnd; ++jm) { - + int jv = jm.index(); if (this->recompute_ && !((*this->recompute_)[iv][0]) && !((*this->recompute_)[jv][0])) continue; - - typename OperatorType::block_type& cm = coarseMat[iv][jv]; - + + typename MatrixType::block_type& cm = coarseMat[iv][jv]; + // Compute im * m * jm, but omitting the critical entries for (int i=0; i<blocksize; i++) { - + for (int j=0; j<blocksize; j++) { - + // Truncated Multigrid: Omit coupling if at least // one of the two vectors is critical if (!critical[v][i] && !critical[w][j]) cm[i][j] += (*im)[0][0] * (*m)[i][j] * (*jm)[0][0]; - } - } - } } } } - } diff --git a/dune/solvers/transferoperators/truncatedmgtransfer.hh b/dune/solvers/transferoperators/truncatedmgtransfer.hh index f6b6653a08c2347b43bbaf0785063b35ddc60fdb..6ca2a0097a2a193f8566d151d424494fb4ff5f75 100644 --- a/dune/solvers/transferoperators/truncatedmgtransfer.hh +++ b/dune/solvers/transferoperators/truncatedmgtransfer.hh @@ -58,7 +58,7 @@ public: MatrixType& coarseMat, const BitVectorType& critical) const = 0; - /** \brief Bitfield specifying a subsets of dofs which need to be recomputed + /** \brief Bitfield specifying a subset of dofs which need to be recomputed * when doing Galerkin restriction * * If this pointer points to NULL it has no effect. If not it is expected