Skip to content
Snippets Groups Projects
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
genericmultigridtransfer.hh 33.46 KiB
// -*- tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
// vi: set et ts=8 sw=4 sts=4:
#ifndef GENERIC_MULTIGRID_TRANSFER_HH
#define GENERIC_MULTIGRID_TRANSFER_HH

#include <dune/istl/bcrsmatrix.hh>
#include <dune/istl/matrixindexset.hh>
#include <dune/common/fmatrix.hh>
#include <dune/common/bitsetvector.hh>

#include <dune/geometry/referenceelements.hh>

#include <dune/localfunctions/lagrange/pqkfactory.hh>

#include "dune/solvers/common/staticmatrixtools.hh"
#include <dune/solvers/common/arithmetic.hh>


/** \brief Restriction and prolongation operator for standard multigrid
 *
 * This class implements the standard prolongation and restriction
 * operators for standard multigrid solvers.  Restriction and prolongation
 * of block vectors is provided.  Internally, the operator is stored
 * as a BCRSMatrix.  Therefore, the template parameter DiscFuncType
 * has to comply with the ISTL requirements.

 * \todo Currently only works for first-order Lagrangian elements!
 */
//template<class DiscFuncType, class MatrixBlock, class TransferOperatorType>
class GenericMultigridTransfer {

public:

    template<class MatrixType, class VectorType>
    static void umv(const MatrixType& A, const VectorType& x, VectorType& y)
    {
        A.umv(x,y);
    }

    template<class VectorType>
    static void umv(const Dune::FieldMatrix<typename VectorType::field_type,1,1>& A, const VectorType& x, VectorType& y)
    {
        y.axpy(A[0][0], x);
    }

    template<class MatrixType, class VectorType>
    static void umtv(const MatrixType& A, const VectorType& x, VectorType& y)
    {
        A.umtv(x,y);
    }

    template<class VectorType>
    static void umtv(const Dune::FieldMatrix<typename VectorType::field_type,1,1>& A, const VectorType& x, VectorType& y)
    {
        y.axpy(A[0][0], x);
    }

    template<class MatrixType>
    static bool diagonalIsOne(const MatrixType& A, unsigned int j)
    {
        if (j>=A.N())
            return A[0][0]>1-1e-5;
        return A[j][j]>1-1e-5;
    }


    template<class TransferOperatorType, class GridType, class field_type>
    static void setup(TransferOperatorType& matrix, const GridType& grid, int cL, int fL)
    {
        typedef typename TransferOperatorType::block_type TransferMatrixBlock;
        typedef typename GridType::ctype ctype;

        if (fL != cL+1)
            DUNE_THROW(Dune::Exception, "The two function spaces don't belong to consecutive levels!");

        const int dim = GridType::dimension;

        const typename GridType::Traits::LevelIndexSet& coarseIndexSet = grid.levelIndexSet(cL);
        const typename GridType::Traits::LevelIndexSet& fineIndexSet   = grid.levelIndexSet(fL);

        int rows = grid.size(fL, dim);
        int cols = grid.size(cL, dim);

        // A factory for the shape functions
        typedef typename Dune::PQkLocalFiniteElementCache<ctype, field_type, dim, 1> P1FECache;
        typedef typename P1FECache::FiniteElementType FEType;
        P1FECache p1FECache;

        matrix.setSize(rows,cols);
        matrix.setBuildMode(TransferOperatorType::random);

        typedef typename GridType::template Codim<0>::LevelIterator ElementIterator;

        typename GridType::LevelGridView levelView = grid.levelGridView(cL);
        ElementIterator cIt    = levelView.template begin<0>();
        ElementIterator cEndIt = levelView.template end<0>();


        // ///////////////////////////////////////////
        // Determine the indices present in the matrix
        // /////////////////////////////////////////////////
        Dune::MatrixIndexSet indices(rows, cols);

        for (; cIt != cEndIt; ++cIt) {

            typedef typename GridType::template Codim<0>::Entity EntityType;
            typedef typename EntityType::HierarchicIterator HierarchicIterator;

            // Get local finite element
            const FEType& coarseBaseSet = p1FECache.get(cIt->type());

            const size_t numCoarseBaseFct = coarseBaseSet.localBasis().size();

            // preallocate vector for function evaluations
            std::vector<Dune::FieldVector<field_type,1> > values(numCoarseBaseFct);

            HierarchicIterator fIt    = cIt->hbegin(fL);
            HierarchicIterator fEndIt = cIt->hend(fL);

            for (; fIt != fEndIt; ++fIt) {

                if (fIt->level()==cIt->level())
                    continue;

                const Dune::ReferenceElement<ctype,dim>& fineRefElement
                    = Dune::ReferenceElements<ctype, dim>::general(fIt->type());

                const typename EntityType::LocalGeometry& fGeometryInFather = fIt->geometryInFather();

                // Get local finite element
                const FEType& fineBaseSet = p1FECache.get(fIt->type());

                const size_t numFineBaseFct = fineBaseSet.localBasis().size();


                for (size_t j=0; j<numFineBaseFct; j++)
                {
                    const Dune::LocalKey& jLocalKey = fineBaseSet.localCoefficients().localKey(j);

                    int globalFine = fineIndexSet.subIndex(*fIt, jLocalKey.subEntity(), jLocalKey.codim());

                    Dune::FieldVector<ctype, dim> fineBasePosition = fineRefElement.position(jLocalKey.subEntity(), jLocalKey.codim());
                    Dune::FieldVector<ctype, dim> local = fGeometryInFather.global(fineBasePosition);

                    // Evaluate coarse grid base functions
                    coarseBaseSet.localBasis().evaluateFunction(local, values);

                    for (size_t i=0; i<numCoarseBaseFct; i++)
                    {
                        if (values[i] > 0.001)
                        {
                            const Dune::LocalKey& iLocalKey = coarseBaseSet.localCoefficients().localKey(i);
                            int globalCoarse = coarseIndexSet.subIndex(*cIt, iLocalKey.subEntity(), iLocalKey.codim());
                            indices.add(globalFine, globalCoarse);
                        }
                    }
                }

            }

        }

        indices.exportIdx(matrix);

        // /////////////////////////////////////////////
        // Compute the matrix
        // /////////////////////////////////////////////
        cIt    = levelView.template begin<0>();
        for (; cIt != cEndIt; ++cIt) {

            // Get local finite element
            const FEType& coarseBaseSet = p1FECache.get(cIt->type());

            const size_t numCoarseBaseFct = coarseBaseSet.localBasis().size();

            typedef typename GridType::template Codim<0>::Entity EntityType;
            typedef typename EntityType::HierarchicIterator HierarchicIterator;

            // preallocate vector for function evaluations
            std::vector<Dune::FieldVector<field_type,1> > values(numCoarseBaseFct);

            HierarchicIterator fIt    = cIt->hbegin(fL);
            HierarchicIterator fEndIt = cIt->hend(fL);

            for (; fIt != fEndIt; ++fIt) {

                if (fIt->level()==cIt->level())
                    continue;

                const Dune::ReferenceElement<ctype,dim>& fineRefElement
                    = Dune::ReferenceElements<ctype, dim>::general(fIt->type());

                const typename EntityType::LocalGeometry& fGeometryInFather = fIt->geometryInFather();

                // Get local finite element
                const FEType& fineBaseSet = p1FECache.get(fIt->type());

                const size_t numFineBaseFct = fineBaseSet.localBasis().size();

                for (size_t j=0; j<numFineBaseFct; j++)
                {
                    const Dune::LocalKey& jLocalKey = fineBaseSet.localCoefficients().localKey(j);

                    int globalFine = fineIndexSet.subIndex(*fIt, jLocalKey.subEntity(), jLocalKey.codim());

                    Dune::FieldVector<ctype, dim> fineBasePosition = fineRefElement.position(jLocalKey.subEntity(), jLocalKey.codim());
                    Dune::FieldVector<ctype, dim> local = fGeometryInFather.global(fineBasePosition);

                    // Evaluate coarse grid base functions
                    coarseBaseSet.localBasis().evaluateFunction(local, values);

                    for (size_t i=0; i<numCoarseBaseFct; i++)
                    {
                        if (values[i] > 0.001)
                        {
                            const Dune::LocalKey& iLocalKey = coarseBaseSet.localCoefficients().localKey(i);
                            int globalCoarse = coarseIndexSet.subIndex(*cIt, iLocalKey.subEntity(), iLocalKey.codim());

                            matrix[globalFine][globalCoarse] = Dune::ScaledIdentityMatrix<ctype,TransferMatrixBlock::rows>(values[i]);
                        }
                    }
                }
            }

        }

    }


    /** \brief Set up transfer operator between two arbitrary grid views of the same grid

       The 'fine' grid view gvFine is expected to be a refinement of the 'coarse' one gvCoarse.

       \param matrix The matrix object to assemble into
     */
    template<class TransferOperatorType, class GridViewCoarse, class GridViewFine, class field_type>
    static void setup(TransferOperatorType& matrix, const GridViewCoarse& gvCoarse, const GridViewFine& gvFine)
    {
        typedef typename TransferOperatorType::block_type TransferMatrixBlock;
        typedef typename GridViewCoarse::ctype ctype;
        typedef typename GridViewCoarse::Grid GridType;

        const int dim = GridViewCoarse::dimension;

        const typename GridViewCoarse::IndexSet& coarseIndexSet = gvCoarse.indexSet();
        const typename GridViewFine::IndexSet&   fineIndexSet   = gvFine.indexSet();

        int rows = gvFine.size(dim);
        int cols = gvCoarse.size(dim);

        // A factory for the shape functions
        typedef typename Dune::PQkLocalFiniteElementCache<ctype, field_type, dim, 1> P1FECache;
        typedef typename P1FECache::FiniteElementType FEType;
        P1FECache p1FECache;

        matrix.setSize(rows,cols);
        matrix.setBuildMode(TransferOperatorType::random);

        typedef typename GridViewFine::template Codim<0>::Iterator ElementIterator;

        ElementIterator fIt    = gvFine.template begin<0>();
        ElementIterator fEndIt = gvFine.template end<0>();


        // ///////////////////////////////////////////
        // Determine the indices present in the matrix
        // /////////////////////////////////////////////////
        Dune::MatrixIndexSet indices(rows, cols);

        for (; fIt != fEndIt; ++fIt) {

            const Dune::ReferenceElement<ctype,dim>& fineRefElement
                = Dune::ReferenceElements<ctype, dim>::general(fIt->type());

            // Get local finite element
            const FEType& fineBaseSet = p1FECache.get(fIt->type());;

            const size_t numFineBaseFct = fineBaseSet.localBasis().size();

            std::vector<Dune::FieldVector<ctype,dim> > local(numFineBaseFct);

            for (size_t i=0; i<numFineBaseFct; i++)
            {
                const Dune::LocalKey& iLocalKey = fineBaseSet.localCoefficients().localKey(i);
                local[i] = fineRefElement.position(iLocalKey.subEntity(), iLocalKey.codim());
            }

            // Get ancestor element in the coarse grid view, and the local position there.
            typename GridViewFine::template Codim<0>::EntityPointer ancestor(fIt);

            while (not gvCoarse.contains(ancestor) && ancestor->level() != 0 ) {

                typename GridType::template Codim<0>::LocalGeometry geometryInFather = ancestor->geometryInFather();

                for (size_t i=0; i<numFineBaseFct; i++)
                    local[i] = geometryInFather.global(local[i]);
                ancestor = ancestor->father();

            }

            assert(gvCoarse.contains(ancestor));

            for (size_t j=0; j<numFineBaseFct; j++)
            {
                // Get local finite element
                const FEType& coarseBaseSet = p1FECache.get(ancestor->type());

                const size_t numCoarseBaseFct = coarseBaseSet.localBasis().size();

                // preallocate vector for function evaluations
                std::vector<Dune::FieldVector<field_type,1> > values(numCoarseBaseFct);

                // Evaluate coarse grid base functions
                coarseBaseSet.localBasis().evaluateFunction(local[j], values);

                const Dune::LocalKey& jLocalKey = fineBaseSet.localCoefficients().localKey(j);
                int globalFine = fineIndexSet.subIndex(*fIt, jLocalKey.subEntity(), jLocalKey.codim());

                for (size_t i=0; i<numCoarseBaseFct; i++)
                {
                    if (values[i] > 0.001)
                    {
                        const Dune::LocalKey& iLocalKey = coarseBaseSet.localCoefficients().localKey(i);
                        int globalCoarse = coarseIndexSet.subIndex(*ancestor, iLocalKey.subEntity(), iLocalKey.codim());
                        indices.add(globalFine, globalCoarse);
                    }
                }
            }

        }

        indices.exportIdx(matrix);

        // /////////////////////////////////////////////
        // Compute the matrix
        // /////////////////////////////////////////////
        for (fIt = gvFine.template begin<0>(); fIt != fEndIt; ++fIt) {


            const Dune::ReferenceElement<ctype,dim>& fineRefElement
                = Dune::ReferenceElements<ctype, dim>::general(fIt->type());

            // Get local finite element
            const FEType& fineBaseSet = p1FECache.get(fIt->type());;

            const size_t numFineBaseFct = fineBaseSet.localBasis().size();

            std::vector<Dune::FieldVector<ctype,dim> > local(numFineBaseFct);

            for (size_t i=0; i<numFineBaseFct; i++)
            {
                const Dune::LocalKey& iLocalKey = fineBaseSet.localCoefficients().localKey(i);
                local[i] = fineRefElement.position(iLocalKey.subEntity(), iLocalKey.codim());
            }

            // Get ancestor element in the coarse grid view, and the local position there.
            typename GridViewFine::template Codim<0>::EntityPointer ancestor(fIt);

            while (not gvCoarse.contains(ancestor) && ancestor->level() != 0 ) {

                typename GridType::template Codim<0>::LocalGeometry geometryInFather = ancestor->geometryInFather();

                for (size_t i=0; i<numFineBaseFct; i++)
                    local[i] = geometryInFather.global(local[i]);
                ancestor = ancestor->father();

            }

            assert(gvCoarse.contains(ancestor));

            for (size_t j=0; j<numFineBaseFct; j++)
            {
                // Get local finite element
                const FEType& coarseBaseSet = p1FECache.get(ancestor->type());

                const size_t numCoarseBaseFct = coarseBaseSet.localBasis().size();

                // preallocate vector for function evaluations
                std::vector<Dune::FieldVector<field_type,1> > values(numCoarseBaseFct);

                // Evaluate coarse grid base functions
                coarseBaseSet.localBasis().evaluateFunction(local[j], values);

                const Dune::LocalKey& jLocalKey = fineBaseSet.localCoefficients().localKey(j);
                int globalFine = fineIndexSet.subIndex(*fIt, jLocalKey.subEntity(), jLocalKey.codim());

                for (size_t i=0; i<numCoarseBaseFct; i++)
                {
                    if (values[i] > 0.001)
                    {
                        const Dune::LocalKey& iLocalKey = coarseBaseSet.localCoefficients().localKey(i);
                        int globalCoarse = coarseIndexSet.subIndex(*ancestor, iLocalKey.subEntity(), iLocalKey.codim());

                        matrix[globalFine][globalCoarse] = Dune::ScaledIdentityMatrix<ctype,TransferMatrixBlock::rows>(values[i]);
                    }
                }
            }

        }

    }

    //! Multiply the vector f from the right to the prolongation matrix
    template<class TransferOperatorType, class DiscFuncType>
    static void prolong(const TransferOperatorType& matrix, const DiscFuncType& f, DiscFuncType &t, int virtualBlockSize = 1)
    {
        if (virtualBlockSize<0)
            virtualBlockSize = f.size()/matrix.M();

        if (f.size() != matrix.M()*virtualBlockSize)
            DUNE_THROW(Dune::Exception, "Number of entries in the coarse grid vector is not equal "
                    << "to the number of columns of the interpolation matrix!");

        t.resize(matrix.N()*virtualBlockSize);

        typedef typename TransferOperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        for(size_t rowIdx=0; rowIdx<matrix.N(); rowIdx++)
        {
            const RowType& row = matrix[rowIdx];

            for(int i=0; i<virtualBlockSize; ++i)
                t[rowIdx*virtualBlockSize+i] = 0.0;

            ColumnIterator cIt    = row.begin();
            ColumnIterator cEndIt = row.end();

            for(; cIt!=cEndIt; ++cIt)
            {
                for(int i=0; i<virtualBlockSize; ++i)
                    umv(*cIt, f[cIt.index()*virtualBlockSize+i], t[rowIdx*virtualBlockSize+i]);
            }
        }
    }


    //! Multiply the vector f from the right to the transpose of the prolongation matrix
    template<class TransferOperatorType, class DiscFuncType>
    static void restrict(const TransferOperatorType& matrix, const DiscFuncType& f, DiscFuncType &t, int virtualBlockSize = 1)
    {
        if (virtualBlockSize<0)
            virtualBlockSize = f.size()/matrix.N();

        if (f.size() != matrix.N()*virtualBlockSize)
            DUNE_THROW(Dune::Exception, "Fine grid vector has " << f.size() << " entries "
                    << "but the interpolation matrix has " << matrix.N() << " rows!");

        t.resize(matrix.M()*virtualBlockSize);
        t = 0;

        typedef typename TransferOperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        for (size_t rowIdx=0; rowIdx<matrix.N(); rowIdx++)
        {
            const RowType& row = matrix[rowIdx];

            ColumnIterator cIt    = row.begin();
            ColumnIterator cEndIt = row.end();

            for(; cIt!=cEndIt; ++cIt)
            {
                for(int i=0; i<virtualBlockSize; ++i)
                    umtv(*cIt, f[rowIdx*virtualBlockSize+i], t[cIt.index()*virtualBlockSize+i]);
            }
        }
    }


    //! Multiply the vector f from the right to the transpose of the prolongation matrix
    template<class TransferOperatorType>
    static void restrictScalarBitField(const TransferOperatorType& matrix, const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t)
    {
        if (f.size() != (unsigned int)matrix.N())
            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
                    << "but the interpolation matrix has " << matrix.N() << " rows!");

        t.resize(matrix.M());
        t.unsetAll();

        typedef typename TransferOperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        for (size_t rowIdx=0; rowIdx<matrix.N(); rowIdx++) {

            if (!f[rowIdx][0])
                continue;

            const RowType& row = matrix[rowIdx];

            ColumnIterator cIt    = row.begin();
            ColumnIterator cEndIt = row.end();

            for(; cIt!=cEndIt; ++cIt)
                t[cIt.index()] = true;
        }
    }


    //! Multiply the vector f from the right to the transpose of the prolongation matrix
    template<class TransferOperatorType>
    static void restrictScalarBitField(const TransferOperatorType& matrix, const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t, int virtualBlockSize)
    {
        if (virtualBlockSize<0)
            virtualBlockSize = f.size()/matrix.N();

        if (f.size() != (unsigned int)matrix.N()*virtualBlockSize)
            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
                    << "but the interpolation matrix has " << matrix.N() << " rows!");

        t.resize(matrix.M()*virtualBlockSize);
        t.unsetAll();

        typedef typename TransferOperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        for (size_t rowIdx=0; rowIdx<matrix.N(); rowIdx++) {

            bool someBitSet = false;
            for(int i=0; i<virtualBlockSize; ++i)
            {
                if (f[rowIdx*virtualBlockSize+i][0])
                {
                    someBitSet = true;
                    continue;
                }
            }
            if (not(someBitSet))
                continue;

            const RowType& row = matrix[rowIdx];

            ColumnIterator cIt    = row.begin();
            ColumnIterator cEndIt = row.end();

            for(; cIt!=cEndIt; ++cIt)
            {
                for(int i=0; i<virtualBlockSize; ++i)
                {
                    if (f[rowIdx*virtualBlockSize+i][0])
                        t[cIt.index()*virtualBlockSize+i] = true;
                }
            }
        }
    }


    //! Restrict a vector valued bitfield from the fine onto the coarse grid
    template <class TransferOperatorType, class BitVectorType>
    static void restrictBitField(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t, bool onlyToFathers=false)
    {
        if (f.size() != matrix.N())
            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
                    << "but the interpolation matrix has " << matrix.N() << " rows!");

        t.resize(matrix.M());
        t.unsetAll();

        typedef typename TransferOperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        for (size_t rowIdx=0; rowIdx<matrix.N(); rowIdx++) {
            const typename BitVectorType::const_reference fineBits = f[rowIdx];

            if (fineBits.none())
                continue;

            const RowType& row = matrix[rowIdx];

            ColumnIterator cIt    = row.begin();
            ColumnIterator cEndIt = row.end();

            for(; cIt!=cEndIt; ++cIt)
            {
                for(size_t i=0; i<fineBits.size(); ++i)
                    if (fineBits.test(i))
                        if (!onlyToFathers || diagonalIsOne(*cIt, i))
                            t[cIt.index()][i] = true;
            }

        }

    }


    //! Restrict a vector valued bitfield from the fine onto the coarse grid
    template <class TransferOperatorType, class BitVectorType>
    static void restrictBitField(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t, int virtualBlockSize, bool onlyToFathers=false)
    {
        if (virtualBlockSize<0)
            virtualBlockSize = f.size()/matrix.N();

        if (f.size() != (unsigned int)matrix.N()*virtualBlockSize)
            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
                    << "but the interpolation matrix has " << matrix.N() << " rows!");

        t.resize(matrix.M()*virtualBlockSize);
        t.unsetAll();

        typedef typename TransferOperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        for (size_t rowIdx=0; rowIdx<matrix.N(); rowIdx++) {

            bool someBitSet = false;
            for(int i=0; i<virtualBlockSize; ++i)
            {
                if (f[rowIdx*virtualBlockSize+i].any())
                {
                    someBitSet = true;
                    continue;
                }
            }
            if (not(someBitSet))
                continue;

            const RowType& row = matrix[rowIdx];

            ColumnIterator cIt    = row.begin();
            ColumnIterator cEndIt = row.end();

            for(; cIt!=cEndIt; ++cIt)
            {
                for(int i=0; i<virtualBlockSize; ++i)
                {
                    const typename BitVectorType::const_reference fineBits = f[rowIdx*virtualBlockSize+i];
                    for(size_t j=0; j<fineBits.size(); ++j)
                        if (fineBits.test(j))
                            if (!onlyToFathers || diagonalIsOne(*cIt, j))
                                t[cIt.index()*virtualBlockSize+i][j] = true;
                }
            }

        }

    }


    //! Restrict a vector valued bitfield from the fine onto the coarse grid
    template <class TransferOperatorType, class BitVectorType>
    static void restrictBitFieldToFathers(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t)
    {
        restrictBitField(matrix, f, t, true);
    }


    //! Restrict a vector valued bitfield from the fine onto the coarse grid
    template <class TransferOperatorType, class BitVectorType>
    static void restrictBitFieldToFathers(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t, int virtualBlockSize)
    {
        restrictBitField(matrix, f, t, virtualBlockSize, true);
    }



    template<class TransferOperatorType, class FineMatrixType, class CoarseMatrixType>
    static void galerkinRestrict(const TransferOperatorType& matrix, const FineMatrixType& fineMat, CoarseMatrixType& coarseMat)
    {
        // ////////////////////////
        // Nonsymmetric case
        // ////////////////////////
        // Clear coarse matrix
        coarseMat = 0;

        // Loop over all rows of the stiffness matrix
        for (size_t v=0; v<fineMat.N(); v++)
        {
            const auto& row = fineMat[v];

            // Loop over all columns of the stiffness matrix
            auto m    = row.begin();
            auto mEnd = row.end();

            for (; m!=mEnd; ++m)
            {
                if (m->infinity_norm()==0)
                    continue;
                int w = m.index();

                // Loop over all coarse grid vectors iv that have v in their support
                auto im    = matrix[v].begin();
                auto imEnd = matrix[v].end();
                for (; im!=imEnd; ++im)
                {
                    int iv = im.index();

                    // Loop over all coarse grid vectors jv that have w in their support
                    auto jm = matrix[w].begin();
                    auto jmEnd = matrix[w].end();

                    for (; jm!=jmEnd; ++jm)
                    {
                        int jv = jm.index();

                        auto& cm = coarseMat[iv][jv];

                        // Compute cm = im^T * m * jm
                        if(TransferOperatorType::block_type::rows==1)
                            Arithmetic::addProduct(cm, (*im)[0][0] * (*jm)[0][0], *m);
                        else
                            StaticMatrix::addTransformedMatrix(cm, *im, *m, *jm);
                    }
                }
            }
        }
    }


    template<class TransferOperatorType, class OperatorType>
    static void galerkinRestrict(const TransferOperatorType& matrix, const OperatorType& fineMat, OperatorType& coarseMat, int virtualBlockSize)
    {
        if (virtualBlockSize<0)
            virtualBlockSize = fineMat.N()/matrix.N();

        // ////////////////////////
        // Nonsymmetric case
        // ////////////////////////
        typedef typename OperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        typedef typename TransferOperatorType::row_type TransferRowType;
        typedef typename TransferRowType::ConstIterator TransferColumnIterator;

        // Clear coarse matrix
        coarseMat = 0;

        // Loop over all rows of the stiffness matrix
        for (size_t v=0; v<fineMat.N(); v++)
        {
            int v_block = v/virtualBlockSize;
            int v_inblock = v%virtualBlockSize;

            const RowType& row = fineMat[v];

            // Loop over all columns of the stiffness matrix
            ColumnIterator m    = row.begin();
            ColumnIterator mEnd = row.end();

            for (; m!=mEnd; ++m)
            {
                if (m->infinity_norm()==0)
                    continue;
                int w = m.index();
                int w_block = w/virtualBlockSize;
                int w_inblock = w%virtualBlockSize;

                // Loop over all coarse grid vectors iv that have v in their support
                TransferColumnIterator im    = matrix[v_block].begin();
                TransferColumnIterator imEnd = matrix[v_block].end();
                for (; im!=imEnd; ++im)
                {
                    int iv = im.index()*virtualBlockSize+v_inblock;

                    // Loop over all coarse grid vectors jv that have w in their support
                    TransferColumnIterator jm = matrix[w_block].begin();
                    TransferColumnIterator jmEnd = matrix[w_block].end();

                    for (; jm!=jmEnd; ++jm)
                    {
                        int jv = jm.index()*virtualBlockSize+w_inblock;

                        typename OperatorType::block_type& cm = coarseMat[iv][jv];

                        // Compute cm = im^T * m * jm
                        if(TransferOperatorType::block_type::rows==1)
                            Arithmetic::addProduct(cm, (*im)[0][0] * (*jm)[0][0], *m);
                        else
                            StaticMatrix::addTransformedMatrix(cm, *im, *m, *jm);
                    }
                }
            }
        }
    }


    //! Set Occupation of Galerkin restricted coarse stiffness matrix
    template<class TransferOperatorType, class OperatorType>
    static void galerkinRestrictSetOccupation(const TransferOperatorType& matrix, const OperatorType& fineMat, OperatorType& coarseMat)
    {
        if (fineMat.N() != matrix.N())
            DUNE_THROW(Dune::Exception, "Fine grid matrix has " << fineMat.N() << " rows "
                << "but the interpolation matrix has " << matrix.N() << " rows!");

        // ////////////////////////
        // Nonsymmetric case
        // ////////////////////////
        typedef typename OperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        typedef typename TransferOperatorType::row_type TransferRowType;
        typedef typename TransferRowType::ConstIterator TransferColumnIterator;

        // Create index set
        Dune::MatrixIndexSet indices(matrix.M(), matrix.M());

        // Loop over all rows of the fine matrix
        for (size_t v=0; v<fineMat.N(); v++)
        {
            const RowType& row = fineMat[v];

            // Loop over all columns of the fine matrix
            ColumnIterator m    = row.begin();
            ColumnIterator mEnd = row.end();

            for (; m!=mEnd; ++m)
            {
                int w = m.index();

                // Loop over all coarse grid vectors iv that have v in their support
                TransferColumnIterator im    = matrix[v].begin();
                TransferColumnIterator imEnd = matrix[v].end();
                for (; im!=imEnd; ++im)
                {
                    int iv = im.index();

                    // Loop over all coarse grid vectors jv that have w in their support
                    TransferColumnIterator jm    = matrix[w].begin();
                    TransferColumnIterator jmEnd = matrix[w].end();

                    for (; jm!=jmEnd; ++jm)
                        indices.add(iv, jm.index());
                }
            }
        }
        indices.exportIdx(coarseMat);
    }


    //! Set Occupation of Galerkin restricted coarse stiffness matrix
    template<class TransferOperatorType, class OperatorType>
    static void galerkinRestrictSetOccupation(const TransferOperatorType& matrix, const OperatorType& fineMat, OperatorType& coarseMat, int virtualBlockSize)
    {
        if (fineMat.N() != matrix.N())
            DUNE_THROW(Dune::Exception, "Fine grid matrix has " << fineMat.N() << " rows "
                << "but the interpolation matrix has " << matrix.N() << " rows!");

        if (virtualBlockSize<0)
            virtualBlockSize = fineMat.N()/matrix.N();
        // ////////////////////////
        // Nonsymmetric case
        // ////////////////////////
        typedef typename OperatorType::row_type RowType;
        typedef typename RowType::ConstIterator ColumnIterator;

        typedef typename TransferOperatorType::row_type TransferRowType;
        typedef typename TransferRowType::ConstIterator TransferColumnIterator;

        // Create index set
        Dune::MatrixIndexSet indices(matrix.M()*virtualBlockSize, matrix.M()*virtualBlockSize);

        // Loop over all rows of the fine matrix
        for (size_t v=0; v<fineMat.N(); v++)
        {
            int v_block = v/virtualBlockSize;
            int v_inblock = v%virtualBlockSize;

            const RowType& row = fineMat[v];

            // Loop over all columns of the fine matrix
            ColumnIterator m    = row.begin();
            ColumnIterator mEnd = row.end();

            for (; m!=mEnd; ++m)
            {
                int w = m.index();
                int w_block = w/virtualBlockSize;
                int w_inblock = w%virtualBlockSize;

                // Loop over all coarse grid vectors iv that have v in their support
                TransferColumnIterator im    = matrix[v_block].begin();
                TransferColumnIterator imEnd = matrix[v_block].end();
                for (; im!=imEnd; ++im)
                {
                    int iv = im.index()*virtualBlockSize+v_inblock;

                    // Loop over all coarse grid vectors jv that have w in their support
                    TransferColumnIterator jm    = matrix[w_block].begin();
                    TransferColumnIterator jmEnd = matrix[w_block].end();

                    for (; jm!=jmEnd; ++jm)
                    {
                        int jv = jm.index()*virtualBlockSize+w_inblock;
                        indices.add(iv, jv);
                    }
                }
            }
        }
        indices.exportIdx(coarseMat);
    }

};

#endif