Commit f9897a89 authored by oliver.sander_at_tu-dresden.de's avatar oliver.sander_at_tu-dresden.de
Browse files

Merge branch 'add-sumfunctionalconstrainedlinearization' into 'master'

Add SumFunctionalConstrainedLinearization class

See merge request !20
parents 78660533 0eb783be
Pipeline #32283 passed with stage
in 3 minutes and 8 seconds
......@@ -12,5 +12,6 @@ install(FILES
quadraticfunctionalconstrainedlinearization.hh
stronglyconvexfunctional.hh
sumfunctional.hh
sumfunctionalconstrainedlinearization.hh
zerofunctional.hh
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/tnnmg/functionals)
#ifndef DUNE_TNNMG_SUMFUNCTIONALCONSTRAINEDLINEARIZATION_HH
#define DUNE_TNNMG_SUMFUNCTIONALCONSTRAINEDLINEARIZATION_HH
#include <tuple>
#include <dune/common/hybridutilities.hh>
#include <dune/solvers/common/resize.hh>
namespace Dune::TNNMG
{
/**
* \brief A constrained linearization for a sum of functionals
*
* \tparam F The SumFunctional to be linearized
* \tparam BV Bit vector types for the degrees of freedom to be ignored
* \tparam Addends A list of ConstrainedLinearization implementations,
* corresponding to the sum functional F
*/
template<class F, class BV, class... Addends>
class SumFunctionalConstrainedLinearization
{
public:
using Matrix = typename std::tuple_element_t<0,typename F::Functions>::Matrix;
using Vector = typename F::Vector;
using BitVector = BV;
using ConstrainedVector = Vector;
using ConstrainedMatrix = Matrix;
using ConstrainedBitVector = BitVector;
/** \brief Constructor
*/
SumFunctionalConstrainedLinearization(const F& f, const BitVector& ignore)
: addends_(std::apply([&](auto&&... fi) {
return std::make_tuple(Addends(fi, ignore)...);
}, f.functions())),
ignore_(ignore)
{}
/** \brief Pre-compute derivative information at the configuration x
*/
void bind(const Vector& x)
{
Hybrid::forEach(addends_, [&](auto&& addend) {
addend.bind(x);
});
// Compute first derivatives of the functionals
Solvers::resizeInitializeZero(negativeGradient_, x);
Hybrid::forEach(addends_, [&](auto&& addend) {
negativeGradient_ += addend.negativeGradient();
});
// Compute second derivatives of the functionals
hessian_ = std::get<0>(addends_).hessian();
Hybrid::forEach(Hybrid::integralRange(std::integral_constant<int, 1>(),Hybrid::size(addends_)), [&](auto i) {
hessian_ += std::get<i>(addends_).hessian();
});
////////////////////////////////////////////////////
// Determine which dofs to truncate
////////////////////////////////////////////////////
// Truncate all degrees of freedom explicitly requested in the constructor
truncationFlags_ = ignore_;
// Also truncate a degree of freedom if one of the addends wants it truncated
for (std::size_t i=0; i<truncationFlags_.size(); i++)
Hybrid::forEach(addends_, [&](auto&& addend) {
truncationFlags_[i] |= addend.truncated()[i];
});
/////////////////////////////////////////////////////
// Do the actual truncation
/////////////////////////////////////////////////////
for (std::size_t i=0; i<x.size(); ++i)
{
// Truncate the gradient
for (std::size_t j=0; j<x[i].size(); ++j)
if (truncationFlags_[i][j])
negativeGradient_[i][j] = 0;
// Truncate the Hesse matrix
for (auto&& [entry, l] : sparseRange(hessian_[i]))
for (std::size_t j=0; j<x[i].size(); ++j)
for (std::size_t k=0; k<x[i].size(); ++k)
if (truncationFlags_[i][j] || truncationFlags_[l][k])
{
entry[j][k] = 0;
#ifndef GAUSSSEIDEL
if (j==k && i == l)
entry[j][k] = 1;
#endif
}
}
}
/** \brief Fill the truncated degrees of freedom in a vector with zeros
*
* \param cv Input vector
* \param[out] v Output vector: will be a copy of cv, with the truncated
* degrees of freedom overwritten by zero
*/
void extendCorrection(const ConstrainedVector& cv, Vector& v) const
{
for (std::size_t i=0; i<v.size(); ++i)
for (std::size_t j=0; j<v[i].size(); ++j)
v[i][j] = (truncationFlags_[i][j]) ? 0 : cv[i][j];
}
/** \brief Access to which degrees of freedom have been truncated */
const BitVector& truncated() const
{
return truncationFlags_;
}
/** \brief The negative gradient of the sum functional
*
* Only call this after a call to bind()!
*/
const auto& negativeGradient() const
{
return negativeGradient_;
}
/** \brief The Hesse matrix of the sum functional
*
* Only call this after a call to bind()!
*/
const auto& hessian() const
{
return hessian_;
}
private:
// The ConstrainedLinearization objects that are being summed
std::tuple<Addends...> addends_;
// Mark degrees of freedom that should be truncated
const BitVector& ignore_;
Vector negativeGradient_;
Matrix hessian_;
BitVector truncationFlags_;
};
} // namespace Dune::TNNMG
#endif // DUNE_TNNMG_SUMFUNCTIONALCONSTRAINEDLINEARIZATION_HH
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment