Commit a01486dd authored by Lasse Hinrichsen's avatar Lasse Hinrichsen
Browse files

Clean up and document

parent b1339a85
Pipeline #37091 failed with stage
in 8 minutes and 41 seconds
......@@ -21,6 +21,8 @@ dot(const V& v, const V& w)
return v * w;
}
/** Returns an object of the same structure as the input and sets it to 0.
*/
template<typename V>
auto
zeroCopy(const V& v)
......@@ -38,16 +40,19 @@ void
applyComponentwise(F&& f, V& u, const V& v)
{
namespace H = Dune::Hybrid;
H::ifElse(
Dune::IsNumber<std::decay_t<V>>(),
[&](auto&& id) { u = f(id(v)); },
[&](auto id) {
H::forEach(H::integralRange(H::size(id(v))), [&](auto i) {
applyComponentwise(f, H::elementAt(u, i), H::elementAt(v, i));
});
if constexpr (Dune::IsNumber<std::decay_t<V>>()) {
u = f(v);
} else {
H::forEach(H::integralRange(H::size(v)), [&](auto i) {
applyComponentwise(f, H::elementAt(u, i), H::elementAt(v, i));
});
}
}
/** For each coordinate i, compute the maximal interval for t such that
* phi(x_i + t * c_i) is stil well defined.
* Take intersection over all i
*/
template<typename F, typename V, typename S>
void
directionalDomain(const V& origin,
......@@ -77,6 +82,10 @@ directionalDomain(const V& origin,
}
}
/** Computes the subdifferential of
* sum_i w_i * phi(x_i + t * d_i)
* and returns by reference into `interval`.
*/
template<typename F, typename V, typename S>
void
addSubdiffs(const V& origin,
......@@ -104,6 +113,8 @@ addSubdiffs(const V& origin,
}
}
/** Return an interval of maximal length.
*/
auto
reals()
{
......@@ -121,6 +132,12 @@ computeFunctionalValue(const V& weights, V pos, const F& phi)
}
}
/** Direction-restricted semilinear functional.
*
* More precisely, we have a functional of the form
* J_c(t) = \sum_i w_i * phi(x_i + t * c_i),
* where x is the supplied origin and c the direction vector.
*/
template<typename V, typename ScalarFunction, typename R = double>
class SemilinearFunctionalDirectionalRestriction
{
......@@ -138,19 +155,21 @@ public:
, phi_(std::forward<FF>(phi))
{}
Range operator()(const Range& t) const
/** Evaluate the functional.
*
* TODO: Implement me.
*/
Range operator()(const Range&) const
{
// This is of course a PITA having to copy vectors
// every single time. Well, what can you do...
// TODO: origin_ part is missing!
DUNE_THROW(Dune::Exception, "Dont use this yet");
auto val = direction_;
val *= t;
return Impl::computeFunctionalValue(weights_, std::move(val), phi_);
DUNE_THROW(Dune::NotImplemented, "No operator() yet");
}
/** Returns the domain of the functional, i.e.
* an interval where the functional is defined.
*
* TODO: What if the domain is not just an interval?
*
*/
auto domain() const
{
auto dom = Impl::reals();
......@@ -159,17 +178,13 @@ public:
return dom;
}
/** Returns the subdifferential at a certain point t, i.e.
* \partial \sum_i w_i * phi(x_i + t*c_i),
* which is
* \sum_i w_i * \partial phi(x_i + t*c_i)*c_i.
*/
auto subDifferential(double t) const
{
// TODO:
/*
Okay, we need to compute the subdifferential of
\sum_i w_i * phi(x_i + t*c_i)
in t.
This should be
\sum_i w_i * 6phi(x_i + t*c_i)*c_i, right?
*/
auto sd = Dune::Solvers::Interval<double>({ 0., 0. });
Impl::addSubdiffs(origin_, direction_, weights_, phi_, t, sd);
......@@ -187,6 +202,33 @@ private:
ScalarFunction phi_;
};
/** A semilinear functional of the form
* J(u) = \sum w_i phi(u_i).
*
* Here, w_i are supposed to be suitable weights (e.g. from mass lumping).
*
* Phi is expected to be a convex, lsc. and proper scalar function that models
* the following interface:
* (TODO: Maybe actually check this with concepts?)
*
* == Member functions ==
* - domain() - should return a Dune::Solvers::Interval stating the domain.
* (TODO: Maybe rather free function?)
* - subDifferential(double t) - Returns a Solvers::Interval representing the
* subdifferential at t.
* - shouldTruncate(double t) - Boolean function that specifies if the values at
* this position should be truncated.
*
* == Free functions ==
* derivative(phi) - should return a function with operator() representing the
* derivative. derivative(derivative(phi)) - should return a function with
* operator() representing the second derivative.
*
* ------------------------------------------------------------------------------
*
* If you want to include other functionals, e.g. a quadratic part, use
* SumFunctional.
*/
template<typename V, typename ScalarFunction, typename R = double>
class SemilinearFunctional
{
......@@ -224,7 +266,8 @@ public:
Range operator()(Vector x) const
{
// TODO copying x is expensive. We should supply both vectors to the compute
// TODO copying x is expensive.
// Maybe we can supply both vectors to the compute
// function.
x += origin_;
return Impl::computeFunctionalValue(weights_, std::move(x), phi_);
......@@ -232,7 +275,7 @@ public:
friend auto shift(const SemilinearFunctional& f, const Vector& origin)
{
auto new_origin = f.origin_;
auto new_origin = autoCopy(f.origin_);
new_origin += origin;
return SemilinearFunctional(f.weights_, new_origin, f.phi_);
}
......@@ -241,11 +284,11 @@ public:
const Vector& origin,
const Vector& direction)
{
// TODO What happens if f already has a nontrivial origin? And if so,
// what SHOULD happen?
auto new_origin = autoCopy(f.origin_);
new_origin += origin;
return SemilinearFunctionalDirectionalRestriction<decltype(f.weights_),
decltype(f.phi_)>(
f.weights_, origin, direction, f.phi_);
f.weights_, new_origin, direction, f.phi_);
}
template<typename Index>
......@@ -255,8 +298,7 @@ public:
using BT = std::decay_t<decltype(f.weights_[0])>;
// TODO: This will of course leave out the constant contributions in the
// other components. But since the minimization is independent from these,
// why bother? The coupling between components should be handled in the
// quadratic part.
// why bother?
return SemilinearFunctional<BT, decltype(f.phi_)>(
f.weights_[i], f.origin_[i], f.phi_);
}
......@@ -273,6 +315,7 @@ private:
};
template<typename V, typename ScalarFunction>
SemilinearFunctional(const V& weights, ScalarFunction&& phi) -> SemilinearFunctional<std::decay_t<V>, std::decay_t<ScalarFunction>>;
SemilinearFunctional(const V& weights, ScalarFunction&& phi)
-> SemilinearFunctional<std::decay_t<V>, std::decay_t<ScalarFunction>>;
}
......@@ -60,6 +60,8 @@ determineTruncation(const NV& x, NBV&& truncationFlags, const F& function)
}
}
// See also:
// https://git.imp.fu-berlin.de/agnumpde/dune-tnnmg/-/merge_requests/17
template<class NV, class NBV>
void
truncateVector(NV& x, const NBV& truncationFlags)
......@@ -75,6 +77,8 @@ truncateVector(NV& x, const NBV& truncationFlags)
}
}
// See also:
// https://git.imp.fu-berlin.de/agnumpde/dune-tnnmg/-/merge_requests/17
template<class NM, class RBV, class CBV>
void
truncateMatrix(NM& A,
......@@ -100,7 +104,24 @@ truncateMatrix(NM& A,
}
/** A constrained linearization for the SemilinearFunctional */
/** A constrained linearization for the SemilinearFunctional.
*
* Note that since we do not know the right matrix structure in general,
* (sparsity pattern, specific interface and so on), we you need to supply a
* dummy matrix which will be copied and overwritten as needed. This can be done
* via the constructor or the method initializeMatrix(...). The easiest way
* would be to just use your stiffness matrix as a dummy. Note, however, that
* the sparsity pattern is likely larger than needed in that case.
*
* Another important thing to know is that the TNNMGStep generates the linearization
* from the functional and the ignore nodes. Hence, you want to initialize the matrix of
* the linearization stored by the TNNMGStep, something along the lines:
*
* auto functional = SemilinearFunctional(...);
* auto linearization = std::make_shared<SemilinearFunctionalConstrainedLinearization<...>>(functional, ignore, matrix);
* auto step = TNNMGStep(functional, ...);
* step.setLinearization(linearization);
*/
template<typename F,
typename ScalarFunction,
typename M,
......
......@@ -190,7 +190,6 @@ solveSystem(V const& u_old, M const& matrix, Grid const& grid)
using namespace Dune;
auto u = u_old;
// u = 0.2; // for the lulz DEBUG
// set up ignore vector. We have no Dirichlet nodes, hence all false.
auto ignore = Solvers::DefaultBitVector_t<V>(u_old.size(), false);
......@@ -206,7 +205,6 @@ solveSystem(V const& u_old, M const& matrix, Grid const& grid)
}
// set up functionals
///* DEBUG: Use obstacle potential
auto phi = LogPotential();
auto quadratic = TNNMG::QuadraticFunctional<M, V, double>(matrix, u_old);
......@@ -235,39 +233,20 @@ solveSystem(V const& u_old, M const& matrix, Grid const& grid)
auto functional_linearization = std::make_shared<Linearization>(
std::make_tuple(quad_lin, semi_lin), ignore);
// */ // End Debug obstacle
// DEBUG OBSTACLE
// V lower(u.size());
// V upper(u.size());
// lower = -1;
// upper = 1;
// using Functional =
// TNNMG::BoxConstrainedQuadraticFunctional<M, V, V, V, double>;
// auto functional = Functional(matrix, u_old, lower, upper);
// using Linearization =
// TNNMG::BoxConstrainedQuadraticFunctionalConstrainedLinearization<Functional,
// BV>;
// using DefectProjection = TNNMG::ObstacleDefectProjection;
// using LineSearchSolver = TNNMG::ScalarObstacleSolver;
// END DEBUG OBSTACLE
// set up TNNMGStep
using LocalSolver = TNNMG::ScalarBisectionSolver;
auto ls = LocalSolver();
// auto ls = TNNMG::ScalarObstacleSolver();
auto gs = gaussSeidelLocalSolver(ls);
using GS = decltype(gs);
using Smoother = TNNMG::NonlinearGSStep<decltype(functional), GS>;
auto proj = TNNMG::SemilinearFunctionalDefectProjection();
// auto linesearch = ls; // probably dont even need to copy
auto linesearch = LocalSolver(1.0, 0.8);
auto smoother = std::make_shared<Smoother>(functional, u, gs);
smoother->setIgnore(ignore);
......@@ -280,34 +259,23 @@ solveSystem(V const& u_old, M const& matrix, Grid const& grid)
decltype(linesearch)>;
auto step = Step(functional, u, smoother, mg, 1, proj, linesearch);
// using Step = TNNMG::TNNMGStep<Functional,
// BV,
// Linearization,
// DefectProjection,
// LineSearchSolver>;
// auto step = Step(
// functional, u, smoother, mg, 1, DefectProjection(), LineSearchSolver());
step.setIgnore(ignore);
step.setLinearization(functional_linearization);
// solve
/// solve algebraic problem
using Solver = Dune::Solvers::LoopSolver<V, BV>;
// TODO: make these available
int iter = 100;
double tol = 1e-12;
using Norm = Solvers::EnergyNorm<M, V>;
// TODO: dont assemble each time
auto stiff = stiffness(grid.leafGridView());
auto norm = std::make_shared<Norm>(stiff);
auto solver = Solver(step, iter, tol, norm, Solver::FULL);
// auto solver = Solver(smoother, iter, tol, norm, Solver::FULL);
///* Only relevant for actual TNNMG
solver.addCriterion(
[&]() {
return Dune::formatString(" % 12.5e", step.lastDampingFactor());
......@@ -320,7 +288,7 @@ solveSystem(V const& u_old, M const& matrix, Grid const& grid)
step.linearization().truncated().count());
},
" truncated ");
// */
solver.preprocess();
solver.solve();
......@@ -395,7 +363,7 @@ main(int argc, char** argv)
}
auto solutions = std::vector<Vector>(num_timesteps);
solutions[0] = u0; // TODO: This is not the function but the functional.
solutions[0] = u0; // Careful: In step 0, this is not the function but the functional.
for (int timestep = 1; timestep < num_timesteps; ++timestep) {
auto u_old = Vector();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment