From 75de37c8d057c5053a9f1b6ecf6fe3b2ab75d98b Mon Sep 17 00:00:00 2001
From: Oliver Sander <sander@igpm.rwth-aachen.de>
Date: Mon, 6 Apr 2009 13:47:58 +0000
Subject: [PATCH] Transfer operators moved to new module dune-solvers

[[Imported from SVN: r2341]]
---
 .../compressedmultigridtransfer.hh            | 152 ++++
 .../densemultigridtransfer.hh                 | 129 +++
 .../genericmultigridtransfer.hh               | 846 ++++++++++++++++++
 .../transferoperators/multigridtransfer.hh    |  73 ++
 .../truncatedcompressedmgtransfer.cc          | 197 ++++
 .../truncatedcompressedmgtransfer.hh          |  82 ++
 .../truncateddensemgtransfer.cc               | 218 +++++
 .../truncateddensemgtransfer.hh               |  82 ++
 .../transferoperators/truncatedmgtransfer.hh  |  73 ++
 9 files changed, 1852 insertions(+)
 create mode 100644 dune-solvers/transferoperators/compressedmultigridtransfer.hh
 create mode 100644 dune-solvers/transferoperators/densemultigridtransfer.hh
 create mode 100644 dune-solvers/transferoperators/genericmultigridtransfer.hh
 create mode 100644 dune-solvers/transferoperators/multigridtransfer.hh
 create mode 100644 dune-solvers/transferoperators/truncatedcompressedmgtransfer.cc
 create mode 100644 dune-solvers/transferoperators/truncatedcompressedmgtransfer.hh
 create mode 100644 dune-solvers/transferoperators/truncateddensemgtransfer.cc
 create mode 100644 dune-solvers/transferoperators/truncateddensemgtransfer.hh
 create mode 100644 dune-solvers/transferoperators/truncatedmgtransfer.hh

diff --git a/dune-solvers/transferoperators/compressedmultigridtransfer.hh b/dune-solvers/transferoperators/compressedmultigridtransfer.hh
new file mode 100644
index 00000000..29d665ae
--- /dev/null
+++ b/dune-solvers/transferoperators/compressedmultigridtransfer.hh
@@ -0,0 +1,152 @@
+#ifndef COMPRESSED_MULTIGRID_TRANSFER_HH
+#define COMPRESSED_MULTIGRID_TRANSFER_HH
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/bitsetvector.hh>
+
+#include "genericmultigridtransfer.hh"
+#include "multigridtransfer.hh"
+
+
+/** \brief Restriction and prolongation operator for standard multigrid
+ *
+ * This class implements the standard prolongation and restriction
+ * operators for standard multigrid solvers.  Restriction and prolongation
+ * of block vectors is provided.  Internally, the operator is stored
+ * as a scalar BCRSMatrix.  Therefore, the template parameter VectorType
+ * has to comply with the ISTL requirements.
+
+ * \todo Currently only works for first-order Lagrangian elements!
+ */
+template<
+    class VectorType,
+    class BitVectorType = Dune::BitSetVector<VectorType::block_type::dimension>,
+    class MatrixType = Dune::BCRSMatrix< typename Dune::FieldMatrix<
+        typename VectorType::field_type, VectorType::block_type::size, VectorType::block_type::size> > >
+class CompressedMultigridTransfer :
+    public MultigridTransfer<VectorType,BitVectorType,MatrixType>
+{
+
+    enum {blocksize = VectorType::block_type::size};
+
+    typedef typename VectorType::field_type field_type;
+
+public:
+
+    typedef MatrixType OperatorType;
+
+    typedef Dune::BCRSMatrix<Dune::FieldMatrix<field_type,1,1> > TransferOperatorType;
+
+    CompressedMultigridTransfer()
+    {
+        matrix_ = new TransferOperatorType;
+        matrixInternallyAllocated = true;
+    }
+
+    virtual ~CompressedMultigridTransfer()
+    {
+        if (matrixInternallyAllocated)
+            delete matrix_;
+    }
+
+
+    /** \brief Sets up the operator between two P1 spaces
+     *
+     * \param grid The grid
+     * \param cL The coarse grid level
+     * \param fL The fine grid level
+     */
+    template <class GridType>
+    void setup(const GridType& grid, int cL, int fL)
+    {
+        GenericMultigridTransfer::setup<TransferOperatorType, GridType, field_type>(*matrix_, grid, cL, fL);
+    }
+
+
+    /** \brief Restrict a function from the fine onto the coarse grid
+     */
+    virtual void restrict(const VectorType& f, VectorType &t) const
+    {
+        GenericMultigridTransfer::restrict(*matrix_, f, t, -1);
+    }
+
+
+    /** \brief Restrict a bitfield from the fine onto the coarse grid
+     */
+    virtual void restrictScalarBitField(const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t) const
+    {
+        GenericMultigridTransfer::restrictScalarBitField(*matrix_, f, t, -1);
+    }
+
+
+    /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid
+     */
+    virtual void restrict(const BitVectorType& f, BitVectorType& t) const
+    {
+        GenericMultigridTransfer::restrictBitField(*matrix_, f, t, -1);
+    }
+
+
+    /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid
+     * Fine bits only influence their father bits
+     */
+    virtual void restrictToFathers(const BitVectorType& f, BitVectorType& t) const
+    {
+        GenericMultigridTransfer::restrictBitFieldToFathers(*matrix_, f, t, -1);
+    }
+
+
+    /** \brief Prolong a function from the coarse onto the fine grid
+     */
+    virtual void prolong(const VectorType& f, VectorType &t) const
+    {
+        GenericMultigridTransfer::prolong(*matrix_, f, t, -1);
+    }
+
+
+    /** \brief Galerkin assemble a coarse stiffness matrix
+     */
+    virtual void galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat) const
+    {
+        GenericMultigridTransfer::galerkinRestrict(*matrix_, fineMat, coarseMat, -1);
+    }
+
+
+    /** \brief Set Occupation of Galerkin restricted coarse stiffness matrix   
+    *
+    * Set occupation of Galerkin restricted coarse matrix. Call this one before
+    * galerkinRestrict to ensure all non-zeroes are present
+    * \param fineMat The fine level matrix
+    * \param coarseMat The coarse level matrix
+    */
+    virtual void galerkinRestrictSetOccupation(const OperatorType& fineMat, OperatorType& coarseMat) const
+    {
+        GenericMultigridTransfer::galerkinRestrictSetOccupation(*matrix_, fineMat, coarseMat, -1);
+    }
+
+
+    /** \brief Direct access to the operator matrix, if you absolutely want it! */
+    const TransferOperatorType& getMatrix() const {
+        return *matrix_;
+    }
+
+    /** \brief Set matrix! */
+    void setMatrix(TransferOperatorType& matrix)
+    {
+        if (matrixInternallyAllocated)
+            delete matrix_;
+        matrixInternallyAllocated = false;
+        matrix_ = &matrix;
+    }
+
+
+
+protected:
+
+    TransferOperatorType* matrix_;
+    bool matrixInternallyAllocated;
+
+};
+
+#endif
diff --git a/dune-solvers/transferoperators/densemultigridtransfer.hh b/dune-solvers/transferoperators/densemultigridtransfer.hh
new file mode 100644
index 00000000..3a785e3c
--- /dev/null
+++ b/dune-solvers/transferoperators/densemultigridtransfer.hh
@@ -0,0 +1,129 @@
+#ifndef DENSE_MULTIGRID_TRANSFER_HH
+#define DENSE_MULTIGRID_TRANSFER_HH
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/bitsetvector.hh>
+
+#include "multigridtransfer.hh"
+#include "genericmultigridtransfer.hh"
+
+
+/** \brief Restriction and prolongation operator for standard multigrid
+ *
+ * This class implements the standard prolongation and restriction
+ * operators for standard multigrid solvers.  Restriction and prolongation
+ * of block vectors is provided.  Internally, the operator is stored
+ * as a BCRSMatrix of small dense matrices.
+ * Therefore, the template parameter VectorType has to comply with the ISTL requirements.
+
+ * \todo Currently only works for first-order Lagrangian elements!
+ */
+template<
+    class VectorType,
+    class BitVectorType = Dune::BitSetVector<VectorType::block_type::dimension>,
+    class MatrixType = Dune::BCRSMatrix< typename Dune::FieldMatrix<
+        typename VectorType::field_type, VectorType::block_type::size, VectorType::block_type::size> > >
+class DenseMultigridTransfer :
+    public MultigridTransfer<VectorType,BitVectorType,MatrixType>
+{
+
+    enum {blocksize = VectorType::block_type::size};
+
+    typedef typename VectorType::field_type field_type;
+
+public:
+
+    typedef Dune::BCRSMatrix< Dune::FieldMatrix< field_type, blocksize, blocksize > > TransferOperatorType;
+
+    virtual ~DenseMultigridTransfer() {}
+
+
+    /** \brief Sets up the operator between two P1 spaces
+     *
+     * \param grid The grid
+     * \param cL The coarse grid level
+     * \param fL The fine grid level
+     */
+    template <class GridType>
+    void setup(const GridType& grid, int cL, int fL)
+    {
+        GenericMultigridTransfer::setup<TransferOperatorType, GridType, field_type>(matrix_, grid, cL, fL);
+    }
+
+
+    /** \brief Restrict a function from the fine onto the coarse grid
+     */
+    virtual void restrict(const VectorType& f, VectorType &t) const
+    {
+        GenericMultigridTransfer::restrict(matrix_, f, t);
+    }
+
+
+    /** \brief Restrict a bitfield from the fine onto the coarse grid
+     */
+    virtual void restrictScalarBitField(const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t) const
+    {
+        GenericMultigridTransfer::restrictScalarBitField(matrix_, f, t);
+    }
+
+
+    /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid
+     */
+    virtual void restrict(const BitVectorType& f, BitVectorType& t) const
+    {
+        GenericMultigridTransfer::restrictBitField(matrix_, f, t);
+    }
+
+
+    /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid
+     * Fine bits only influence their father bits
+     */
+    virtual void restrictToFathers(const BitVectorType& f, BitVectorType& t) const
+    {
+        GenericMultigridTransfer::restrictBitFieldToFathers(matrix_, f, t);
+    }
+
+
+    /** \brief Prolong a function from the coarse onto the fine grid
+     */
+    virtual void prolong(const VectorType& f, VectorType &t) const
+    {
+        GenericMultigridTransfer::prolong(matrix_, f, t);
+    }
+
+
+    /** \brief Galerkin assemble a coarse stiffness matrix
+     */
+    virtual void galerkinRestrict(const MatrixType& fineMat, MatrixType& coarseMat) const
+    {
+        GenericMultigridTransfer::galerkinRestrict(matrix_, fineMat, coarseMat);
+    }
+
+
+    /** \brief Set Occupation of Galerkin restricted coarse stiffness matrix   
+    *
+    * Set occupation of Galerkin restricted coarse matrix. Call this one before
+    * galerkinRestrict to ensure all non-zeroes are present
+    * \param fineMat The fine level matrix
+    * \param coarseMat The coarse level matrix
+    */
+    virtual void galerkinRestrictSetOccupation(const MatrixType& fineMat, MatrixType& coarseMat) const
+    {
+        GenericMultigridTransfer::galerkinRestrictSetOccupation(matrix_, fineMat, coarseMat);
+    }
+
+
+    /** \brief Direct access to the operator matrix, if you absolutely want it! */
+    const TransferOperatorType& getMatrix() const {
+        return matrix_;
+    }
+
+
+protected:
+
+    TransferOperatorType matrix_;
+
+};
+
+#endif
diff --git a/dune-solvers/transferoperators/genericmultigridtransfer.hh b/dune-solvers/transferoperators/genericmultigridtransfer.hh
new file mode 100644
index 00000000..a63eaf48
--- /dev/null
+++ b/dune-solvers/transferoperators/genericmultigridtransfer.hh
@@ -0,0 +1,846 @@
+#ifndef GENERIC_MULTIGRID_TRANSFER_HH
+#define GENERIC_MULTIGRID_TRANSFER_HH
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/istl/matrixindexset.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/bitsetvector.hh>
+#include <dune/disc/shapefunctions/lagrangeshapefunctions.hh>
+
+#include "dune/ag-common/staticmatrixtools.hh"
+
+
+/** \brief Restriction and prolongation operator for standard multigrid
+ *
+ * This class implements the standard prolongation and restriction
+ * operators for standard multigrid solvers.  Restriction and prolongation
+ * of block vectors is provided.  Internally, the operator is stored
+ * as a BCRSMatrix.  Therefore, the template parameter DiscFuncType
+ * has to comply with the ISTL requirements.
+
+ * \todo Currently only works for first-order Lagrangian elements!
+ */
+//template<class DiscFuncType, class MatrixBlock, class TransferOperatorType>
+class GenericMultigridTransfer {
+
+
+
+public:
+
+    template<class MatrixType, class VectorType>
+    static void umv(const MatrixType& A, const VectorType& x, VectorType& y)
+    {
+        A.umv(x,y);
+    }
+
+    template<class VectorType>
+    static void umv(const Dune::FieldMatrix<typename VectorType::field_type,1,1>& A, const VectorType& x, VectorType& y)
+    {
+        y.axpy(A[0][0], x);
+    }
+
+    template<class MatrixType, class VectorType>
+    static void umtv(const MatrixType& A, const VectorType& x, VectorType& y)
+    {
+        A.umtv(x,y);
+    }
+
+    template<class VectorType>
+    static void umtv(const Dune::FieldMatrix<typename VectorType::field_type,1,1>& A, const VectorType& x, VectorType& y)
+    {
+        y.axpy(A[0][0], x);
+    }
+
+    template<class MatrixType>
+    static bool diagonalIsOne(const MatrixType& A, int j, int size)
+    {
+        if (size==1)
+            return A[0][0]>1-1e-5;
+        return A[j][j]>1-1e-5;
+    }
+
+
+    template<class TransferOperatorType, class GridType, class field_type>
+    static void setup(TransferOperatorType& matrix, const GridType& grid, int cL, int fL)
+    {
+        typedef typename TransferOperatorType::block_type TransferMatrixBlock;
+
+        if (fL != cL+1)
+            DUNE_THROW(Dune::Exception, "The two function spaces don't belong to consecutive levels!");
+
+        const int dim = GridType::dimension;
+
+        const typename GridType::Traits::LevelIndexSet& coarseIndexSet = grid.levelIndexSet(cL);
+        const typename GridType::Traits::LevelIndexSet& fineIndexSet   = grid.levelIndexSet(fL);
+
+        int rows = grid.size(fL, dim);
+        int cols = grid.size(cL, dim);
+
+        // Make identity matrix
+        TransferMatrixBlock identity(0);
+        for (int i=0; i<identity.N(); i++)
+            identity[i][i] = 1;
+
+        matrix.setSize(rows,cols);
+        matrix.setBuildMode(TransferOperatorType::random);
+
+        typedef typename GridType::template Codim<0>::LevelIterator ElementIterator;
+        typedef typename GridType::ctype ctype;
+
+        ElementIterator cIt    = grid.template lbegin<0>(cL);
+        ElementIterator cEndIt = grid.template lend<0>(cL);
+
+
+        // ///////////////////////////////////////////
+        // Determine the indices present in the matrix
+        // /////////////////////////////////////////////////
+        Dune::MatrixIndexSet indices(rows, cols);
+
+        for (; cIt != cEndIt; ++cIt) {
+
+            typedef typename GridType::template Codim<0>::Entity EntityType;
+            typedef typename EntityType::HierarchicIterator HierarchicIterator;
+
+            const Dune::LagrangeShapeFunctionSet<ctype, field_type, dim>& coarseBaseSet 
+                = Dune::LagrangeShapeFunctions<ctype, field_type, dim>::general(cIt->type(), 1);
+            const int numCoarseBaseFct = coarseBaseSet.size();
+
+            HierarchicIterator fIt    = cIt->hbegin(fL);
+            HierarchicIterator fEndIt = cIt->hend(fL);
+
+            for (; fIt != fEndIt; ++fIt) {
+
+                if (fIt->level()==cIt->level())
+                    continue;
+
+                const typename EntityType::Geometry& fGeometryInFather = fIt->geometryInFather();
+
+                const Dune::LagrangeShapeFunctionSet<ctype, field_type, dim>& fineBaseSet 
+                    = Dune::LagrangeShapeFunctions<ctype, field_type, dim>::general(fIt->type(), 1);
+                const int numFineBaseFct = fineBaseSet.size();
+
+                for (int i=0; i<numCoarseBaseFct; i++) {
+
+                    int globalCoarse = coarseIndexSet.template subIndex<dim>(*cIt,i);
+
+                    for (int j=0; j<numFineBaseFct; j++) {
+
+                        int globalFine = fineIndexSet.template subIndex<dim>(*fIt,j);
+
+                        Dune::FieldVector<ctype, dim> local = fGeometryInFather.global(fineBaseSet[j].position());
+
+                        // Evaluate coarse grid base function
+                        field_type value = coarseBaseSet[i].evaluateFunction(0, local);
+
+                        if (value > 0.001) 
+                            indices.add(globalFine, globalCoarse);
+
+                    }
+
+
+                }
+
+            }
+
+        }
+
+        indices.exportIdx(matrix);
+
+        // /////////////////////////////////////////////
+        // Compute the matrix
+        // /////////////////////////////////////////////
+        cIt    = grid.template lbegin<0>(cL);
+        for (; cIt != cEndIt; ++cIt) {
+
+            const Dune::LagrangeShapeFunctionSet<ctype, field_type, dim>& coarseBaseSet 
+                = Dune::LagrangeShapeFunctions<ctype, field_type, dim>::general(cIt->type(), 1);
+            const int numCoarseBaseFct = coarseBaseSet.size();
+
+            typedef typename GridType::template Codim<0>::Entity EntityType;
+            typedef typename EntityType::HierarchicIterator HierarchicIterator;
+
+            HierarchicIterator fIt    = cIt->hbegin(fL);
+            HierarchicIterator fEndIt = cIt->hend(fL);
+
+            for (; fIt != fEndIt; ++fIt) {
+
+                if (fIt->level()==cIt->level())
+                    continue;
+
+                const typename EntityType::Geometry& fGeometryInFather = fIt->geometryInFather();
+
+                const Dune::LagrangeShapeFunctionSet<ctype, field_type, dim>& fineBaseSet 
+                    = Dune::LagrangeShapeFunctions<ctype, field_type, dim>::general(fIt->type(), 1);
+                const int numFineBaseFct = fineBaseSet.size();
+
+                for (int i=0; i<numCoarseBaseFct; i++) {
+
+                    int globalCoarse = coarseIndexSet.template subIndex<dim>(*cIt,i);
+
+                    for (int j=0; j<numFineBaseFct; j++) {
+
+                        int globalFine = fineIndexSet.template subIndex<dim>(*fIt,j);
+
+                        // Evaluate coarse grid base function at the location of the fine grid dof
+
+                        // first determine local fine grid dof position
+                        Dune::FieldVector<ctype, dim> local = fGeometryInFather.global(fineBaseSet[j].position());
+
+                        // Evaluate coarse grid base function
+                        double value = coarseBaseSet[i].evaluateFunction(0, local);
+
+                        // The following conditional is a hack:  evaluating the coarse
+                        // grid base function will often return 0.  However, we don't
+                        // want explicit zero entries in our prolongation matrix.  Since
+                        // the whole code works for P1-elements only anyways, we know
+                        // that value can only be 0, 0.5, or 1.  Thus testing for nonzero
+                        // by testing > 0.001 is safe.
+                        if (value > 0.001) {
+                            TransferMatrixBlock matValue = identity;
+                            matValue *= value;
+                            matrix[globalFine][globalCoarse] = matValue;
+                        }
+
+                    }
+
+
+                }
+
+            }
+
+        }
+
+    }
+
+
+    //! Multiply the vector f from the right to the prolongation matrix
+    template<class TransferOperatorType, class DiscFuncType>
+    static void prolong(const TransferOperatorType& matrix, const DiscFuncType& f, DiscFuncType &t)
+    {
+        if (f.size() != matrix.M())
+            DUNE_THROW(Dune::Exception, "Number of entries in the coarse grid vector is not equal "
+                    << "to the number of columns of the interpolation matrix!");
+
+        t.resize(matrix.N());
+
+        typedef typename DiscFuncType::Iterator      Iterator;
+        typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for(int rowIdx=0; rowIdx<matrix.N(); rowIdx++)
+        {
+            const RowType& row = matrix[rowIdx];
+
+            t[rowIdx] = 0.0;
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+                umv(*cIt, f[cIt.index()], t[rowIdx]);
+        }
+    }
+
+
+    //! Multiply the vector f from the right to the prolongation matrix
+    template<class TransferOperatorType, class DiscFuncType>
+    static void prolong(const TransferOperatorType& matrix, const DiscFuncType& f, DiscFuncType &t, int virtualBlockSize)
+    {
+        if (virtualBlockSize<0)
+            virtualBlockSize = f.size()/matrix.M();
+
+        if (f.size() != matrix.M()*virtualBlockSize)
+            DUNE_THROW(Dune::Exception, "Number of entries in the coarse grid vector is not equal "
+                    << "to the number of columns of the interpolation matrix!");
+
+        t.resize(matrix.N()*virtualBlockSize);
+
+        typedef typename DiscFuncType::Iterator      Iterator;
+        typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for(int rowIdx=0; rowIdx<matrix.N(); rowIdx++)
+        {
+            const RowType& row = matrix[rowIdx];
+
+            for(int i=0; i<virtualBlockSize; ++i)
+                t[rowIdx*virtualBlockSize+i] = 0.0;
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+            {
+                for(int i=0; i<virtualBlockSize; ++i)
+                    umv(*cIt, f[cIt.index()*virtualBlockSize+i], t[rowIdx*virtualBlockSize+i]);
+            }
+        }
+    }
+
+
+    //! Multiply the vector f from the right to the transpose of the prolongation matrix
+    template<class TransferOperatorType, class DiscFuncType>
+    static void restrict(const TransferOperatorType& matrix, const DiscFuncType& f, DiscFuncType &t)
+    {
+        if (f.size() != matrix.N())
+            DUNE_THROW(Dune::Exception, "Fine grid vector has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M());
+        t = 0;
+
+        typedef typename DiscFuncType::Iterator      Iterator;
+        typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++)
+        {
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+                umtv(*cIt, f[rowIdx], t[cIt.index()]);
+        }
+    }
+
+
+    //! Multiply the vector f from the right to the transpose of the prolongation matrix
+    template<class TransferOperatorType, class DiscFuncType>
+    static void restrict(const TransferOperatorType& matrix, const DiscFuncType& f, DiscFuncType &t, int virtualBlockSize)
+    {
+        if (virtualBlockSize<0)
+            virtualBlockSize = f.size()/matrix.N();
+
+        if (f.size() != matrix.N()*virtualBlockSize)
+            DUNE_THROW(Dune::Exception, "Fine grid vector has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M()*virtualBlockSize);
+        t = 0;
+
+        typedef typename DiscFuncType::Iterator      Iterator;
+        typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++)
+        {
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+            {
+                for(int i=0; i<virtualBlockSize; ++i)
+                    umtv(*cIt, f[rowIdx*virtualBlockSize+i], t[cIt.index()*virtualBlockSize+i]);
+            }
+        }
+    }
+
+
+    //! Multiply the vector f from the right to the transpose of the prolongation matrix
+    template<class TransferOperatorType>
+    static void restrictScalarBitField(const TransferOperatorType& matrix, const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t)
+    {
+        if (f.size() != (unsigned int)matrix.N())
+            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M());
+        t.unsetAll();
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++) {
+
+            if (!f[rowIdx][0])
+                continue;
+
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+                t[cIt.index()] = true;
+        }
+    }
+
+
+    //! Multiply the vector f from the right to the transpose of the prolongation matrix
+    template<class TransferOperatorType>
+    static void restrictScalarBitField(const TransferOperatorType& matrix, const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t, int virtualBlockSize)
+    {
+        if (virtualBlockSize<0)
+            virtualBlockSize = f.size()/matrix.N();
+
+        if (f.size() != (unsigned int)matrix.N()*virtualBlockSize)
+            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M()*virtualBlockSize);
+        t.unsetAll();
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++) {
+
+            bool someBitSet = false;
+            for(int i=0; i<virtualBlockSize; ++i)
+            {
+                if (f[rowIdx*virtualBlockSize+i][0])
+                {
+                    someBitSet = true;
+                    continue;
+                }
+            }
+            if (not(someBitSet))
+                continue;
+
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+            {
+                for(int i=0; i<virtualBlockSize; ++i)
+                {
+                    if (f[rowIdx*virtualBlockSize+i][0])
+                        t[cIt.index()*virtualBlockSize+i] = true;
+                }
+            }
+        }
+    }
+
+
+    //! Restrict a vector valued bitfield from the fine onto the coarse grid
+    template <class TransferOperatorType, class BitVectorType>
+    static void restrictBitField(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t)
+    {
+        if (f.size() != matrix.N())
+            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M());
+        t.unsetAll();
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++) {
+            const typename BitVectorType::const_reference fineBits = f[rowIdx];
+
+            if (fineBits.none())
+                continue;
+
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+            {
+                for(int i=0; i<fineBits.size(); ++i)
+                    if (fineBits.test(i))
+                        t[cIt.index()][i] = true;
+            }
+
+        }
+
+    }
+
+
+    //! Restrict a vector valued bitfield from the fine onto the coarse grid
+    template <class TransferOperatorType, class BitVectorType>
+    static void restrictBitField(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t, int virtualBlockSize)
+    {
+        if (virtualBlockSize<0)
+            virtualBlockSize = f.size()/matrix.N();
+
+        if (f.size() != (unsigned int)matrix.N()*virtualBlockSize)
+            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M()*virtualBlockSize);
+        t.unsetAll();
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++) {
+
+            bool someBitSet = false;
+            for(int i=0; i<virtualBlockSize; ++i)
+            {
+                if (f[rowIdx*virtualBlockSize+i].any())
+                {
+                    someBitSet = true;
+                    continue;
+                }
+            }
+            if (not(someBitSet))
+                continue;
+
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+            {
+                for(int i=0; i<virtualBlockSize; ++i)
+                {
+                    const typename BitVectorType::const_reference fineBits = f[rowIdx*virtualBlockSize+i];
+                    for(int j=0; j<fineBits.size(); ++j)
+                        if (fineBits.test(j))
+                            t[cIt.index()*virtualBlockSize+i][j] = true;
+                }
+            }
+
+        }
+
+    }
+
+
+    //! Restrict a vector valued bitfield from the fine onto the coarse grid
+    template <class TransferOperatorType, class BitVectorType>
+    static void restrictBitFieldToFathers(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t)
+    {
+        if (f.size() != matrix.N())
+            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M());
+        t.unsetAll();
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++) {
+            const typename BitVectorType::const_reference fineBits = f[rowIdx];
+
+            if (fineBits.none())
+                continue;
+
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+            {
+                for(int i=0; i<fineBits.size(); ++i)
+                    if (fineBits.test(i))
+                        if (diagonalIsOne(*cIt, i, fineBits.size()))
+                            t[cIt.index()][i] = true;
+            }
+        }
+    }
+
+
+    //! Restrict a vector valued bitfield from the fine onto the coarse grid
+    template <class TransferOperatorType, class BitVectorType>
+    static void restrictBitFieldToFathers(const TransferOperatorType& matrix, const BitVectorType& f, BitVectorType& t, int virtualBlockSize)
+    {
+        if (virtualBlockSize<0)
+            virtualBlockSize = f.size()/matrix.N();
+
+        if (f.size() != (unsigned int)matrix.N()*virtualBlockSize)
+            DUNE_THROW(Dune::Exception, "Fine grid bitfield has " << f.size() << " entries "
+                    << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        t.resize(matrix.M()*virtualBlockSize);
+        t.unsetAll();
+
+        typedef typename TransferOperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        for (int rowIdx=0; rowIdx<matrix.N(); rowIdx++) {
+
+            bool someBitSet = false;
+            for(int i=0; i<virtualBlockSize; ++i)
+            {
+                if (f[rowIdx*virtualBlockSize+i].any())
+                {
+                    someBitSet = true;
+                    continue;
+                }
+            }
+            if (not(someBitSet))
+                continue;
+
+            const RowType& row = matrix[rowIdx];
+
+            ColumnIterator cIt    = row.begin();
+            ColumnIterator cEndIt = row.end();
+
+            for(; cIt!=cEndIt; ++cIt)
+            {
+                for(int i=0; i<virtualBlockSize; ++i)
+                {
+                    const typename BitVectorType::const_reference fineBits = f[rowIdx*virtualBlockSize+i];
+                    for(int j=0; j<fineBits.size(); ++j)
+                        if (fineBits.test(j))
+                            if (diagonalIsOne(*cIt, j, fineBits.size()))
+                                t[cIt.index()*virtualBlockSize+i][j] = true;
+                }
+            }
+        }
+    }
+
+
+
+    template<class TransferOperatorType, class OperatorType>
+    static void galerkinRestrict(const TransferOperatorType& matrix, const OperatorType& fineMat, OperatorType& coarseMat)
+    {
+        // ////////////////////////
+        // Nonsymmetric case
+        // ////////////////////////
+        typedef typename OperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        typedef typename TransferOperatorType::row_type TransferRowType;
+        typedef typename TransferRowType::ConstIterator TransferColumnIterator;
+
+        // Clear coarse matrix
+        coarseMat = 0;
+
+        // Loop over all rows of the stiffness matrix
+        for (int v=0; v<fineMat.N(); v++)
+        {
+            const RowType& row = fineMat[v];
+
+            // Loop over all columns of the stiffness matrix
+            ColumnIterator m    = row.begin();
+            ColumnIterator mEnd = row.end();
+
+            for (; m!=mEnd; ++m)
+            {
+                int w = m.index();
+
+                // Loop over all coarse grid vectors iv that have v in their support
+                TransferColumnIterator im    = matrix[v].begin();
+                TransferColumnIterator imEnd = matrix[v].end();
+                for (; im!=imEnd; ++im)
+                {
+                    int iv = im.index();
+
+                    // Loop over all coarse grid vectors jv that have w in their support
+                    TransferColumnIterator jm = matrix[w].begin();
+                    TransferColumnIterator jmEnd = matrix[w].end();
+
+                    for (; jm!=jmEnd; ++jm)
+                    {
+                        int jv = jm.index();
+
+                        typename OperatorType::block_type& cm = coarseMat[iv][jv];
+
+                        // Compute cm = im^T * m * jm
+                        if(TransferOperatorType::block_type::rows==1)
+                            StaticMatrix::axpy(cm, (*im)[0][0] * (*jm)[0][0], *m);
+                        else
+                            StaticMatrix::addTransformedMatrix(cm, *im, *m, *jm);
+                    }
+                }
+            }
+        }
+    }
+
+
+    template<class TransferOperatorType, class OperatorType>
+    static void galerkinRestrict(const TransferOperatorType& matrix, const OperatorType& fineMat, OperatorType& coarseMat, int virtualBlockSize)
+    {
+        if (virtualBlockSize<0)
+            virtualBlockSize = fineMat.N()/matrix.N();
+
+        // ////////////////////////
+        // Nonsymmetric case
+        // ////////////////////////
+        typedef typename OperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        typedef typename TransferOperatorType::row_type TransferRowType;
+        typedef typename TransferRowType::ConstIterator TransferColumnIterator;
+
+        // Clear coarse matrix
+        coarseMat = 0;
+
+        // Loop over all rows of the stiffness matrix
+        for (int v=0; v<fineMat.N(); v++)
+        {
+            int v_block = v/virtualBlockSize;
+            int v_inblock = v%virtualBlockSize;
+
+            const RowType& row = fineMat[v];
+
+            // Loop over all columns of the stiffness matrix
+            ColumnIterator m    = row.begin();
+            ColumnIterator mEnd = row.end();
+
+            for (; m!=mEnd; ++m)
+            {
+                int w = m.index();
+                int w_block = w/virtualBlockSize;
+                int w_inblock = w%virtualBlockSize;
+
+                // Loop over all coarse grid vectors iv that have v in their support
+                TransferColumnIterator im    = matrix[v_block].begin();
+                TransferColumnIterator imEnd = matrix[v_block].end();
+                for (; im!=imEnd; ++im)
+                {
+                    int iv = im.index()*virtualBlockSize+v_inblock;
+
+                    // Loop over all coarse grid vectors jv that have w in their support
+                    TransferColumnIterator jm = matrix[w_block].begin();
+                    TransferColumnIterator jmEnd = matrix[w_block].end();
+
+                    for (; jm!=jmEnd; ++jm)
+                    {
+                        int jv = jm.index()*virtualBlockSize+w_inblock;
+
+                        typename OperatorType::block_type& cm = coarseMat[iv][jv];
+
+                        // Compute cm = im^T * m * jm
+                        if(TransferOperatorType::block_type::rows==1)
+                            StaticMatrix::axpy(cm, (*im)[0][0] * (*jm)[0][0], *m);
+                        else
+                            StaticMatrix::addTransformedMatrix(cm, *im, *m, *jm);
+                    }
+                }
+            }
+        }
+    }
+
+
+    //! Set Occupation of Galerkin restricted coarse stiffness matrix
+    template<class TransferOperatorType, class OperatorType>
+    static void galerkinRestrictSetOccupation(const TransferOperatorType& matrix, const OperatorType& fineMat, OperatorType& coarseMat)
+    {
+        if (fineMat.N() != matrix.N())
+            DUNE_THROW(Dune::Exception, "Fine grid matrix has " << fineMat.N() << " rows "
+                << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        // ////////////////////////
+        // Nonsymmetric case
+        // ////////////////////////
+        typedef typename OperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        typedef typename TransferOperatorType::row_type TransferRowType;
+        typedef typename TransferRowType::ConstIterator TransferColumnIterator;
+
+        // Create index set
+        Dune::MatrixIndexSet indices(matrix.M(), matrix.M());
+
+        // Loop over all rows of the fine matrix
+        for (int v=0; v<fineMat.N(); v++)
+        {
+            const RowType& row = fineMat[v];
+
+            // Loop over all columns of the fine matrix
+            ColumnIterator m    = row.begin();
+            ColumnIterator mEnd = row.end();
+
+            for (; m!=mEnd; ++m)
+            {
+                int w = m.index();
+
+                // Loop over all coarse grid vectors iv that have v in their support
+                TransferColumnIterator im    = matrix[v].begin();
+                TransferColumnIterator imEnd = matrix[v].end();
+                for (; im!=imEnd; ++im)
+                {
+                    int iv = im.index();
+
+                    // Loop over all coarse grid vectors jv that have w in their support
+                    TransferColumnIterator jm    = matrix[w].begin();
+                    TransferColumnIterator jmEnd = matrix[w].end();
+
+                    for (; jm!=jmEnd; ++jm)
+                        indices.add(iv, jm.index());
+                }
+            }
+        }
+        indices.exportIdx(coarseMat);
+    }
+
+
+    //! Set Occupation of Galerkin restricted coarse stiffness matrix
+    template<class TransferOperatorType, class OperatorType>
+    static void galerkinRestrictSetOccupation(const TransferOperatorType& matrix, const OperatorType& fineMat, OperatorType& coarseMat, int virtualBlockSize)
+    {
+        if (fineMat.N() != matrix.N())
+            DUNE_THROW(Dune::Exception, "Fine grid matrix has " << fineMat.N() << " rows "
+                << "but the interpolation matrix has " << matrix.N() << " rows!");
+
+        if (virtualBlockSize<0)
+            virtualBlockSize = fineMat.N()/matrix.N();
+
+        // ////////////////////////
+        // Nonsymmetric case
+        // ////////////////////////
+        typedef typename OperatorType::row_type RowType;
+        typedef typename RowType::ConstIterator ColumnIterator;
+
+        typedef typename TransferOperatorType::row_type TransferRowType;
+        typedef typename TransferRowType::ConstIterator TransferColumnIterator;
+
+        // Create index set
+        Dune::MatrixIndexSet indices(matrix.M()*virtualBlockSize, matrix.M()*virtualBlockSize);
+
+        // Loop over all rows of the fine matrix
+        for (int v=0; v<fineMat.N(); v++)
+        {
+            int v_block = v/virtualBlockSize;
+            int v_inblock = v%virtualBlockSize;
+
+            const RowType& row = fineMat[v];
+
+            // Loop over all columns of the fine matrix
+            ColumnIterator m    = row.begin();
+            ColumnIterator mEnd = row.end();
+
+            for (; m!=mEnd; ++m)
+            {
+                int w = m.index();
+                int w_block = w/virtualBlockSize;
+                int w_inblock = w%virtualBlockSize;
+
+                // Loop over all coarse grid vectors iv that have v in their support
+                TransferColumnIterator im    = matrix[v_block].begin();
+                TransferColumnIterator imEnd = matrix[v_block].end();
+                for (; im!=imEnd; ++im)
+                {
+                    int iv = im.index()*virtualBlockSize+v_inblock;
+
+                    // Loop over all coarse grid vectors jv that have w in their support
+                    TransferColumnIterator jm    = matrix[w_block].begin();
+                    TransferColumnIterator jmEnd = matrix[w_block].end();
+
+                    for (; jm!=jmEnd; ++jm)
+                    {
+                        int jv = jm.index()*virtualBlockSize+w_inblock;
+                        indices.add(iv, jv);
+                    }
+                }
+            }
+        }
+        indices.exportIdx(coarseMat);
+    }
+
+};
+
+#endif
diff --git a/dune-solvers/transferoperators/multigridtransfer.hh b/dune-solvers/transferoperators/multigridtransfer.hh
new file mode 100644
index 00000000..455727a8
--- /dev/null
+++ b/dune-solvers/transferoperators/multigridtransfer.hh
@@ -0,0 +1,73 @@
+#ifndef MULTIGRID_TRANSFER_HH
+#define MULTIGRID_TRANSFER_HH
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/bitsetvector.hh>
+
+
+/** \brief Restriction and prolongation operator for standard multigrid
+ *
+ * This class is the base class for prolongation and restriction
+ * operators for standard multigrid solvers.  Restriction and prolongation
+ * of block vectors is provided.
+
+ * \todo Currently only works for first-order Lagrangian elements!
+ */
+template<
+    class VectorType,
+    class BitVectorType = Dune::BitSetVector<VectorType::block_type::dimension>,
+    class MatrixType = Dune::BCRSMatrix< typename Dune::FieldMatrix<
+        typename VectorType::field_type, VectorType::block_type::size, VectorType::block_type::size> > >
+class MultigridTransfer {
+
+public:
+
+    typedef MatrixType OperatorType;
+
+    virtual ~MultigridTransfer() {}
+
+
+    /** \brief Restrict a function from the fine onto the coarse grid
+     */
+    virtual void restrict(const VectorType& f, VectorType &t) const = 0;
+
+
+    /** \brief Restrict a bitfield from the fine onto the coarse grid
+     */
+    virtual void restrictScalarBitField(const Dune::BitSetVector<1>& f, Dune::BitSetVector<1>& t) const = 0;
+
+
+    /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid
+     */
+    virtual void restrict(const BitVectorType& f, BitVectorType& t) const = 0;
+
+
+    /** \brief Restrict a vector valued bitfield from the fine onto the coarse grid
+     * Fine bits only influence their father bits
+     */
+    virtual void restrictToFathers(const BitVectorType& f, BitVectorType& t) const = 0;
+
+
+    /** \brief Prolong a function from the coarse onto the fine grid
+     */
+    virtual void prolong(const VectorType& f, VectorType &t) const = 0;
+
+
+    /** \brief Galerkin assemble a coarse stiffness matrix
+     */
+    virtual void galerkinRestrict(const MatrixType& fineMat, MatrixType& coarseMat) const = 0;
+
+
+    /** \brief Set Occupation of Galerkin restricted coarse stiffness matrix   
+    *
+    * Set occupation of Galerkin restricted coarse matrix. Call this one before
+    * galerkinRestrict to ensure all non-zeroes are present
+    * \param fineMat The fine level matrix
+    * \param coarseMat The coarse level matrix
+    */
+    virtual void galerkinRestrictSetOccupation(const MatrixType& fineMat, MatrixType& coarseMat) const = 0;
+
+};
+
+#endif
diff --git a/dune-solvers/transferoperators/truncatedcompressedmgtransfer.cc b/dune-solvers/transferoperators/truncatedcompressedmgtransfer.cc
new file mode 100644
index 00000000..f4e62037
--- /dev/null
+++ b/dune-solvers/transferoperators/truncatedcompressedmgtransfer.cc
@@ -0,0 +1,197 @@
+
+
+template<class DiscFuncType, class BitVectorType, class OperatorType>
+void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::prolong(const DiscFuncType& f, DiscFuncType& t,
+                                              const BitVectorType& critical) const
+{
+    if (f.size() != this->matrix_->M())
+        DUNE_THROW(Dune::Exception, "Number of entries in the coarse grid vector is not equal "
+                   << "to the number of columns of the prolongation matrix!");
+
+    if (((unsigned int)this->matrix_->N()) != critical.size())
+        DUNE_THROW(Dune::Exception, "Number of entries in the critical is not equal "
+                   << "to the number of rows of the prolongation matrix!");
+
+    t.resize(this->matrix_->N());
+
+    typedef typename DiscFuncType::Iterator      Iterator;
+    typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+    typedef typename TransferOperatorType::row_type RowType;
+    typedef typename RowType::ConstIterator ColumnIterator;
+    
+    Iterator tIt      = t.begin();
+    ConstIterator fIt = f.begin(); 
+
+    for(int rowIdx=0; rowIdx<this->matrix_->N(); rowIdx++) {
+
+        const RowType& row = (*this->matrix_)[rowIdx];
+
+        *tIt = 0.0;
+        ColumnIterator cIt    = row.begin();
+        ColumnIterator cEndIt = row.end();
+
+        for(; cIt!=cEndIt; ++cIt) {
+
+            // The following lines are a matrix-vector loop with a multiple of the
+            // identity matrix, but rows belonging to critical dofs are left out
+            for (int i=0; i<blocksize; i++) {
+
+                if (!critical[rowIdx][i])
+                    (*tIt)[i] += (*cIt)[0][0] * f[cIt.index()][i];
+
+            }
+
+        }
+        
+        ++tIt;
+    }
+
+}
+
+template<class DiscFuncType, class BitVectorType, class OperatorType>
+void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::restrict(const DiscFuncType& f, DiscFuncType& t,
+                                              const BitVectorType& critical) const
+{
+    if (f.size() != this->matrix_->N())
+        DUNE_THROW(Dune::Exception, "Fine grid vector has " << f.size() << " entries "
+                   << "but the interpolation matrix has " << this->matrix_->N() << " rows!");
+
+
+    if (((unsigned int)this->matrix_->N()) != critical.size())
+        DUNE_THROW(Dune::Exception, "Number of entries in the critical is not equal "
+                   << "to the number of rows of the prolongation matrix!");
+
+    t.resize(this->matrix_->M());
+    t = 0;
+
+    typedef typename DiscFuncType::Iterator      Iterator;
+    typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+    typedef typename TransferOperatorType::row_type RowType;
+    typedef typename RowType::ConstIterator ColumnIterator;
+
+    Iterator tIt      = t.begin();
+    ConstIterator fIt = f.begin();
+    
+    for (int rowIdx=0; rowIdx<this->matrix_->N(); rowIdx++) {
+        
+        const RowType& row = (*this->matrix_)[rowIdx];
+
+        ColumnIterator cIt    = row.begin();
+        ColumnIterator cEndIt = row.end();
+        
+        for(; cIt!=cEndIt; ++cIt) {
+            
+            // The following lines are a matrix-vector loop, but rows belonging
+            // to critical dofs are left out
+            typename DiscFuncType::block_type& tEntry = t[cIt.index()];
+            for (int i=0; i<blocksize; i++) {
+                
+                if (!critical[rowIdx][i])
+                    tEntry[i] += (*cIt)[0][0] * f[rowIdx][i];
+
+            }
+
+        }
+
+    }
+ 
+}
+
+
+template<class DiscFuncType, class BitVectorType, class OperatorType>
+void TruncatedCompressedMGTransfer<DiscFuncType, BitVectorType, OperatorType>::
+galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat,
+                 const BitVectorType& critical) const
+{
+
+    if (this->recompute_ != NULL && this->recompute_->size() != (unsigned int)this->matrix_->M())
+        DUNE_THROW(Dune::Exception, "The size of the recompute_-bitfield doesn't match the "
+                   << "size of the coarse grid space!");
+
+  // ////////////////////////
+  // Nonsymmetric case
+  // ////////////////////////
+  typedef typename OperatorType::row_type RowType;
+  typedef typename RowType::Iterator ColumnIterator;
+  typedef typename RowType::ConstIterator ConstColumnIterator;
+
+  typedef typename TransferOperatorType::row_type::ConstIterator TransferConstColumnIterator;
+
+  // Clear coarse matrix
+  if (this->recompute_ == NULL)
+      coarseMat = 0;
+  else {
+      for (int i=0; i<coarseMat.N(); i++) {
+
+          RowType& row = coarseMat[i];
+          
+          // Loop over all columns of the stiffness matrix
+          ColumnIterator m    = row.begin();
+          ColumnIterator mEnd = row.end();
+          
+          for (; m!=mEnd; ++m) {
+              
+              if ((*this->recompute_)[i][0] || (*this->recompute_)[m.index()][0])
+                  *m = 0;
+
+          }
+
+      }
+
+  }
+
+  // Loop over all rows of the stiffness matrix
+  for (int v=0; v<fineMat.N(); v++) {
+      
+      const RowType& row = fineMat[v];
+
+      // Loop over all columns of the stiffness matrix
+      ConstColumnIterator m    = row.begin();
+      ConstColumnIterator mEnd = row.end();
+        
+      for (; m!=mEnd; ++m) {
+
+          int w = m.index();
+
+          // Loop over all coarse grid vectors iv that have v in their support
+          TransferConstColumnIterator im    = (*this->matrix_)[v].begin();
+          TransferConstColumnIterator imEnd = (*this->matrix_)[v].end();
+          for (; im!=imEnd; ++im) {
+
+              int iv = im.index();
+
+              // Loop over all coarse grid vectors jv that have w in their support
+              TransferConstColumnIterator jm    = (*this->matrix_)[w].begin();
+              TransferConstColumnIterator jmEnd = (*this->matrix_)[w].end();
+              
+              for (; jm!=jmEnd; ++jm) {
+                  
+                  int jv = jm.index();
+
+                  if (this->recompute_ && !((*this->recompute_)[iv][0]) && !((*this->recompute_)[jv][0]))
+                      continue;
+                  
+                  typename OperatorType::block_type& cm = coarseMat[iv][jv];
+                  
+                  // Compute im * m * jm, but omitting the critical entries
+                  for (int i=0; i<blocksize; i++) {
+                      
+                      for (int j=0; j<blocksize; j++) {
+                          
+                          // Truncated Multigrid:  Omit coupling if at least
+                          // one of the two vectors is critical
+                          if (!critical[v][i] && !critical[w][j])
+                              cm[i][j] += (*im)[0][0] * (*m)[i][j] * (*jm)[0][0];
+                          
+                      }
+                      
+                  }
+
+              }
+          }
+      }
+  }
+
+}
diff --git a/dune-solvers/transferoperators/truncatedcompressedmgtransfer.hh b/dune-solvers/transferoperators/truncatedcompressedmgtransfer.hh
new file mode 100644
index 00000000..f7b4bd82
--- /dev/null
+++ b/dune-solvers/transferoperators/truncatedcompressedmgtransfer.hh
@@ -0,0 +1,82 @@
+#ifndef TRUNCATED_COMPRESSED_MG_TRANSFER_HH
+#define TRUNCATED_COMPRESSED_MG_TRANSFER_HH
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/bitsetvector.hh>
+
+#include "truncatedmgtransfer.hh"
+#include "compressedmultigridtransfer.hh"
+
+
+/** \brief Restriction and prolongation operator for truncated multigrid
+ *
+ * This class provides prolongation and restriction operators for truncated
+ * multigrid.  That means that you can explicitly switch off certain
+ * fine grid degrees-of-freedom.  This often leads to better coarse
+ * grid corrections when treating obstacle problems.  For an introduction
+ * to the theory of truncated multigrid see 'Adaptive Monotone Multigrid
+ * Methods for Nonlinear Variational Problems' by R. Kornhuber.
+ *
+ * Currently only works for first-order Lagrangian elements!
+ */
+
+template<
+    class VectorType,
+    class BitVectorType = Dune::BitSetVector<VectorType::block_type::dimension>,
+    class MatrixType = Dune::BCRSMatrix< typename Dune::FieldMatrix<
+        typename VectorType::field_type, VectorType::block_type::size, VectorType::block_type::size> > >
+class TruncatedCompressedMGTransfer :
+    public CompressedMultigridTransfer<VectorType, BitVectorType, MatrixType>,
+    public TruncatedMGTransfer<VectorType, BitVectorType, MatrixType>
+{
+
+    enum {blocksize = VectorType::block_type::size};
+
+    typedef typename VectorType::field_type field_type;
+
+public:
+
+    typedef typename CompressedMultigridTransfer<VectorType, BitVectorType, MatrixType>::TransferOperatorType TransferOperatorType;
+    
+
+    /** \brief Default constructor */
+    TruncatedCompressedMGTransfer()
+    {}
+
+    /** \brief Restrict level fL of f and store the result in level cL of t
+     *
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    void restrict(const VectorType& f, VectorType &t, const BitVectorType& critical) const;
+
+    /** \brief Restriction of  MultiGridTransfer*/
+    using CompressedMultigridTransfer< VectorType, BitVectorType, MatrixType >::restrict;
+
+    /** \brief Prolong level cL of f and store the result in level fL of t
+     * 
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    void prolong(const VectorType& f, VectorType &t, const BitVectorType& critical) const;
+
+    /** \brief Prolongation of  MultiGridTransfer*/
+    using CompressedMultigridTransfer< VectorType, BitVectorType, MatrixType >::prolong;
+
+    /** \brief Galerkin assemble a coarse stiffness matrix
+     *
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    void galerkinRestrict(const MatrixType& fineMat, MatrixType& coarseMat, const BitVectorType& critical) const;
+
+    /** \brief Galerkin restriction of  MultiGridTransfer*/
+    using CompressedMultigridTransfer< VectorType, BitVectorType, MatrixType >::galerkinRestrict;
+
+};
+
+
+#include "truncatedcompressedmgtransfer.cc"
+
+#endif
diff --git a/dune-solvers/transferoperators/truncateddensemgtransfer.cc b/dune-solvers/transferoperators/truncateddensemgtransfer.cc
new file mode 100644
index 00000000..263d1b79
--- /dev/null
+++ b/dune-solvers/transferoperators/truncateddensemgtransfer.cc
@@ -0,0 +1,218 @@
+
+
+template<class DiscFuncType, class BitVectorType, class OperatorType>
+void TruncatedDenseMGTransfer<DiscFuncType, BitVectorType, OperatorType>::prolong(const DiscFuncType& f, DiscFuncType& t,
+                                              const BitVectorType& critical) const
+{
+    if (f.size() != this->matrix_.M())
+        DUNE_THROW(Dune::Exception, "Number of entries in the coarse grid vector is not equal "
+                   << "to the number of columns of the prolongation matrix!");
+
+    if (((unsigned int)this->matrix_.N()) != critical.size())
+        DUNE_THROW(Dune::Exception, "Number of entries in the critical is not equal "
+                   << "to the number of rows of the prolongation matrix!");
+
+    t.resize(this->matrix_.N());
+
+    typedef typename DiscFuncType::Iterator      Iterator;
+    typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+    typedef typename OperatorType::row_type RowType;
+    typedef typename RowType::ConstIterator ColumnIterator;
+    
+    Iterator tIt      = t.begin();
+    ConstIterator fIt = f.begin(); 
+
+
+
+    for(int rowIdx=0; rowIdx<this->matrix_.N(); rowIdx++) {
+
+        const RowType& row = this->matrix_[rowIdx];
+
+        *tIt = 0.0;
+        ColumnIterator cIt    = row.begin();
+        ColumnIterator cEndIt = row.end();
+
+        for(; cIt!=cEndIt; ++cIt) {
+
+            // The following lines are a matrix-vector loop, but rows belonging
+            // to critical dofs are left out
+            for (int i=0; i<blocksize; i++) {
+
+                for (int j=0; j<blocksize; j++) {
+
+                    if (!critical[rowIdx][i])
+                        (*tIt)[i] += (*cIt)[i][j] * f[cIt.index()][j];
+
+                }
+
+            }
+
+        }
+        
+        ++tIt;
+    }
+
+}
+
+template<class DiscFuncType, class BitVectorType, class OperatorType>
+void TruncatedDenseMGTransfer<DiscFuncType, BitVectorType, OperatorType>::restrict(const DiscFuncType& f, DiscFuncType& t,
+                                              const BitVectorType& critical) const
+{
+    if (f.size() != this->matrix_.N())
+        DUNE_THROW(Dune::Exception, "Fine grid vector has " << f.size() << " entries "
+                   << "but the interpolation matrix has " << this->matrix_.N() << " rows!");
+
+
+    if (((unsigned int)this->matrix_.N()) != critical.size())
+        DUNE_THROW(Dune::Exception, "Number of entries in the critical is not equal "
+                   << "to the number of rows of the prolongation matrix!");
+
+    t.resize(this->matrix_.M());
+    t = 0;
+
+    typedef typename DiscFuncType::Iterator      Iterator;
+    typedef typename DiscFuncType::ConstIterator ConstIterator;
+
+    typedef typename OperatorType::row_type RowType;
+    typedef typename RowType::ConstIterator ColumnIterator;
+
+    Iterator tIt      = t.begin();
+    ConstIterator fIt = f.begin();
+    
+    for (int rowIdx=0; rowIdx<this->matrix_.N(); rowIdx++) {
+        
+        const RowType& row = this->matrix_[rowIdx];
+
+        ColumnIterator cIt    = row.begin();
+        ColumnIterator cEndIt = row.end();
+        
+        for(; cIt!=cEndIt; ++cIt) {
+            
+            // The following lines are a matrix-vector loop, but rows belonging
+            // to critical dofs are left out
+            typename DiscFuncType::block_type& tEntry = t[cIt.index()];
+            for (int i=0; i<blocksize; i++) {
+                
+                for (int j=0; j<blocksize; j++) {
+                    
+                    if (!critical[rowIdx][j])
+                        tEntry[i] += (*cIt)[j][i] * f[rowIdx][j];
+                    
+                }
+
+            }
+
+        }
+
+    }
+ 
+}
+
+
+template<class DiscFuncType, class BitVectorType, class OperatorType>
+void TruncatedDenseMGTransfer<DiscFuncType, BitVectorType, OperatorType>::
+galerkinRestrict(const OperatorType& fineMat, OperatorType& coarseMat,
+                 const BitVectorType& critical) const
+{
+
+    if (this->recompute_ != NULL && this->recompute_->size() != (unsigned int)this->matrix_.M())
+        DUNE_THROW(Dune::Exception, "The size of the recompute_-bitfield doesn't match the "
+                   << "size of the coarse grid space!");
+
+  // ////////////////////////
+  // Nonsymmetric case
+  // ////////////////////////
+  typedef typename OperatorType::row_type RowType;
+  typedef typename RowType::Iterator ColumnIterator;
+  typedef typename RowType::ConstIterator ConstColumnIterator;
+
+  // Clear coarse matrix
+  if (this->recompute_ == NULL)
+      coarseMat = 0;
+  else {
+      for (int i=0; i<coarseMat.N(); i++) {
+
+          RowType& row = coarseMat[i];
+          
+          // Loop over all columns of the stiffness matrix
+          ColumnIterator m    = row.begin();
+          ColumnIterator mEnd = row.end();
+          
+          for (; m!=mEnd; ++m) {
+              
+              if ((*this->recompute_)[i][0] || (*this->recompute_)[m.index()][0])
+                  *m = 0;
+
+          }
+
+      }
+
+  }
+
+  // Loop over all rows of the stiffness matrix
+  for (int v=0; v<fineMat.N(); v++) {
+      
+      const RowType& row = fineMat[v];
+
+      // Loop over all columns of the stiffness matrix
+      ConstColumnIterator m    = row.begin();
+      ConstColumnIterator mEnd = row.end();
+        
+      for (; m!=mEnd; ++m) {
+
+          int w = m.index();
+
+          // Loop over all coarse grid vectors iv that have v in their support
+          ConstColumnIterator im    = this->matrix_[v].begin();
+          ConstColumnIterator imEnd = this->matrix_[v].end();
+          for (; im!=imEnd; ++im) {
+
+              int iv = im.index();
+
+              // Loop over all coarse grid vectors jv that have w in their support
+              ConstColumnIterator jm = this->matrix_[w].begin();
+              ConstColumnIterator jmEnd = this->matrix_[w].end();
+              
+              for (; jm!=jmEnd; ++jm) {
+                  
+                  int jv = jm.index();
+
+                  if (this->recompute_ && !((*this->recompute_)[iv][0]) && !((*this->recompute_)[jv][0]))
+                      continue;
+                  
+                  typename OperatorType::block_type& cm = coarseMat[iv][jv];
+                  
+                  // Compute im * m * jm, but omitting the critical entries
+                  for (int i=0; i<blocksize; i++) {
+                      
+                      for (int j=0; j<blocksize; j++) {
+                          
+                          double sum = 0.0;
+                          
+                          for (int k=0; k<blocksize; k++) {
+                              
+                              for (int l=0; l<blocksize; l++) {
+                                  // Truncated Multigrid:  Omit coupling if at least
+                                  // one of the two vectors is critical
+                                  if (!critical[v][k] && !critical[w][l]) {
+                                      sum += (*im)[k][i] * (*m)[k][l] * (*jm)[l][j];
+                                      
+                                  } 
+                                  
+                              }
+                              
+                          }
+                          
+                          cm[i][j] += sum;
+                          
+                      }
+                      
+                  }
+
+              }
+          }
+      }
+  }
+
+}
diff --git a/dune-solvers/transferoperators/truncateddensemgtransfer.hh b/dune-solvers/transferoperators/truncateddensemgtransfer.hh
new file mode 100644
index 00000000..46ae1c80
--- /dev/null
+++ b/dune-solvers/transferoperators/truncateddensemgtransfer.hh
@@ -0,0 +1,82 @@
+#ifndef TRUNCATED_DENSE_MG_TRANSFER_HH
+#define TRUNCATED_DENSE_MG_TRANSFER_HH
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/bitsetvector.hh>
+
+#include "densemultigridtransfer.hh"
+#include "truncatedmgtransfer.hh"
+
+/** \brief Restriction and prolongation operator for truncated multigrid
+ *
+ * This class provides prolongation and restriction operators for truncated
+ * multigrid.  That means that you can explicitly switch off certain
+ * fine grid degrees-of-freedom.  This often leads to better coarse
+ * grid corrections when treating obstacle problems.  For an introduction
+ * to the theory of truncated multigrid see 'Adaptive Monotone Multigrid
+ * Methods for Nonlinear Variational Problems' by R. Kornhuber.
+ *
+ * Currently only works for first-order Lagrangian elements!
+ */
+
+template<
+    class VectorType,
+    class BitVectorType = Dune::BitSetVector<VectorType::block_type::dimension>,
+    class MatrixType = Dune::BCRSMatrix< typename Dune::FieldMatrix<
+        typename VectorType::field_type, VectorType::block_type::size, VectorType::block_type::size> > >
+class TruncatedDenseMGTransfer :
+    public DenseMultigridTransfer<VectorType, BitVectorType, MatrixType>,
+    public TruncatedMGTransfer<VectorType, BitVectorType, MatrixType>
+{
+
+    enum {blocksize = VectorType::block_type::size};
+
+    typedef typename VectorType::field_type field_type;
+
+public:
+
+    typedef MatrixType OperatorType;
+
+    typedef Dune::BCRSMatrix< Dune::FieldMatrix< field_type, blocksize, blocksize > > TransferOperatorType;
+
+    /** \brief Default constructor */
+    TruncatedDenseMGTransfer()
+    {}
+
+    /** \brief Restrict level fL of f and store the result in level cL of t
+     *
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    void restrict(const VectorType& f, VectorType &t, const BitVectorType& critical) const;
+
+    /** \brief Restriction of  MultiGridTransfer*/
+    using DenseMultigridTransfer< VectorType, BitVectorType, MatrixType >::restrict;
+
+    /** \brief Prolong level cL of f and store the result in level fL of t
+     * 
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    void prolong(const VectorType& f, VectorType &t, const BitVectorType& critical) const;
+
+    /** \brief Prolongation of  MultiGridTransfer*/
+    using DenseMultigridTransfer< VectorType, BitVectorType, MatrixType >::prolong;
+
+    /** \brief Galerkin assemble a coarse stiffness matrix
+     *
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    void galerkinRestrict(const MatrixType& fineMat, MatrixType& coarseMat, const BitVectorType& critical) const;
+
+    /** \brief Galerkin restriction of  MultiGridTransfer*/
+    using DenseMultigridTransfer< VectorType, BitVectorType, MatrixType >::galerkinRestrict;
+
+};
+
+
+#include "truncateddensemgtransfer.cc"
+
+#endif
diff --git a/dune-solvers/transferoperators/truncatedmgtransfer.hh b/dune-solvers/transferoperators/truncatedmgtransfer.hh
new file mode 100644
index 00000000..d6a21635
--- /dev/null
+++ b/dune-solvers/transferoperators/truncatedmgtransfer.hh
@@ -0,0 +1,73 @@
+#ifndef TRUNCATED_MG_TRANSFER_HH
+#define TRUNCATED_MG_TRANSFER_HH
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/bitsetvector.hh>
+
+/** \brief Restriction and prolongation interface class for truncated multigrid
+ *
+ * This class provides prolongation and restriction operators for truncated
+ * multigrid.  That means that you can explicitly switch off certain
+ * fine grid degrees-of-freedom.  This often leads to better coarse
+ * grid corrections when treating obstacle problems.  For an introduction
+ * to the theory of truncated multigrid see 'Adaptive Monotone Multigrid
+ * Methods for Nonlinear Variational Problems' by R. Kornhuber.
+ *
+ * Currently only works for first-order Lagrangian elements!
+ */
+
+template<
+    class VectorType,
+    class BitVectorType = Dune::BitSetVector<VectorType::block_type::dimension>,
+    class MatrixType = Dune::BCRSMatrix< typename Dune::FieldMatrix<
+        typename VectorType::field_type, VectorType::block_type::size, VectorType::block_type::size> > >
+class TruncatedMGTransfer
+{
+
+    enum {blocksize = VectorType::block_type::size};
+
+    typedef typename VectorType::field_type field_type;
+
+public:
+
+    /** \brief Default constructor */
+    TruncatedMGTransfer() : recompute_(NULL)
+    {}
+
+    /** \brief Restrict level fL of f and store the result in level cL of t
+     *
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    virtual void restrict(const VectorType& f, VectorType &t, const BitVectorType& critical) const = 0;
+
+    /** \brief Prolong level cL of f and store the result in level fL of t
+     * 
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    virtual void prolong(const VectorType& f, VectorType &t, const BitVectorType& critical) const = 0;
+
+    /** \brief Galerkin assemble a coarse stiffness matrix
+     *
+     * \param critical Has to contain an entry for each degree of freedom.
+     *        Those dofs with a set bit are treated as critical.
+     */
+    virtual void galerkinRestrict(const MatrixType& fineMat, 
+                                  MatrixType& coarseMat, 
+                                  const BitVectorType& critical) const = 0;
+
+    /** \brief Bitfield specifying a subsets of dofs which need to be recomputed
+     * when doing Galerkin restriction
+     *
+     * If this pointer points to NULL it has no effect.  If not it is expected
+     * to point to a bitfield with the size of the coarse function space.
+     * Then, when calling galerkinRestrict(), only those matrix entries which
+     * involve at least one dof which has a set bit get recomputed.  Depending
+     * on the problem this can lead to considerable time savings.
+     */
+    const Dune::BitSetVector<1>* recompute_;
+};
+
+#endif
-- 
GitLab